aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:17:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-21 14:17:05 -0400
commit79c4581262e225a7c96d88b632b05ab3b5e9a52c (patch)
tree8ef030c74ab7e0d0df27cf86195f915efd2832f7 /arch/powerpc/platforms
parent59534f7298c5e28aaa64e6ed550e247f64ee72ae (diff)
parent99ec28f183daa450faa7bdad6f932364ae325648 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (92 commits) powerpc: Remove unused 'protect4gb' boot parameter powerpc: Build-in e1000e for pseries & ppc64_defconfig powerpc/pseries: Make request_ras_irqs() available to other pseries code powerpc/numa: Use ibm,architecture-vec-5 to detect form 1 affinity powerpc/numa: Set a smaller value for RECLAIM_DISTANCE to enable zone reclaim powerpc: Use smt_snooze_delay=-1 to always busy loop powerpc: Remove check of ibm,smt-snooze-delay OF property powerpc/kdump: Fix race in kdump shutdown powerpc/kexec: Fix race in kexec shutdown powerpc/kexec: Speedup kexec hash PTE tear down powerpc/pseries: Add hcall to read 4 ptes at a time in real mode powerpc: Use more accurate limit for first segment memory allocations powerpc/kdump: Use chip->shutdown to disable IRQs powerpc/kdump: CPUs assume the context of the oopsing CPU powerpc/crashdump: Do not fail on NULL pointer dereferencing powerpc/eeh: Fix oops when probing in early boot powerpc/pci: Check devices status property when scanning OF tree powerpc/vio: Switch VIO Bus PM to use generic helpers powerpc: Avoid bad relocations in iSeries code powerpc: Use common cpu_die (fixes SMP+SUSPEND build) ...
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/44x/Kconfig20
-rw-r--r--arch/powerpc/platforms/44x/Makefile1
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c167
-rw-r--r--arch/powerpc/platforms/83xx/mpc831x_rdb.c1
-rw-r--r--arch/powerpc/platforms/83xx/mpc837x_rdb.c1
-rw-r--r--arch/powerpc/platforms/86xx/mpc8610_hpcd.c3
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype5
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/powerpc/platforms/iseries/exception.S4
-rw-r--r--arch/powerpc/platforms/iseries/pci.c10
-rw-r--r--arch/powerpc/platforms/iseries/smp.c2
-rw-r--r--arch/powerpc/platforms/pasemi/cpufreq.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_64.c2
-rw-r--r--arch/powerpc/platforms/powermac/low_i2c.c7
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h2
-rw-r--r--arch/powerpc/platforms/powermac/setup.c13
-rw-r--r--arch/powerpc/platforms/powermac/smp.c9
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c8
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c2
-rw-r--r--arch/powerpc/platforms/pseries/event_sources.c79
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c67
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S38
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c33
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h26
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h7
-rw-r--r--arch/powerpc/platforms/pseries/ras.c62
-rw-r--r--arch/powerpc/platforms/pseries/setup.c10
-rw-r--r--arch/powerpc/platforms/pseries/smp.c42
-rw-r--r--arch/powerpc/platforms/pseries/xics.c38
30 files changed, 489 insertions, 176 deletions
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 7486bffd3ebb..eeba0a70e466 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -1,3 +1,12 @@
1config PPC_47x
2 bool "Support for 47x variant"
3 depends on 44x
4 default n
5 select MPIC
6 help
7 This option enables support for the 47x family of processors and is
8 not currently compatible with other 44x or 46x varients
9
1config BAMBOO 10config BAMBOO
2 bool "Bamboo" 11 bool "Bamboo"
3 depends on 44x 12 depends on 44x
@@ -151,6 +160,17 @@ config YOSEMITE
151 help 160 help
152 This option enables support for the AMCC PPC440EP evaluation board. 161 This option enables support for the AMCC PPC440EP evaluation board.
153 162
163config ISS4xx
164 bool "ISS 4xx Simulator"
165 depends on (44x || 40x)
166 default n
167 select 405GP if 40x
168 select 440GP if 44x && !PPC_47x
169 select PPC_FPU
170 select OF_RTC
171 help
172 This option enables support for the IBM ISS simulation environment
173
154#config LUAN 174#config LUAN
155# bool "Luan" 175# bool "Luan"
156# depends on 44x 176# depends on 44x
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile
index ee6185aeaa3b..82ff326e0795 100644
--- a/arch/powerpc/platforms/44x/Makefile
+++ b/arch/powerpc/platforms/44x/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_SAM440EP) += sam440ep.o
5obj-$(CONFIG_WARP) += warp.o 5obj-$(CONFIG_WARP) += warp.o
6obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o 6obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o
7obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o 7obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o
8obj-$(CONFIG_ISS4xx) += iss4xx.o
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
new file mode 100644
index 000000000000..aa46e9d1e771
--- /dev/null
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -0,0 +1,167 @@
1/*
2 * PPC476 board specific routines
3 *
4 * Copyright 2010 Torez Smith, IBM Corporation.
5 *
6 * Based on earlier code:
7 * Matt Porter <mporter@kernel.crashing.org>
8 * Copyright 2002-2005 MontaVista Software Inc.
9 *
10 * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
11 * Copyright (c) 2003-2005 Zultys Technologies
12 *
13 * Rewritten and ported to the merged powerpc tree:
14 * Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
15 *
16 * This program is free software; you can redistribute it and/or modify it
17 * under the terms of the GNU General Public License as published by the
18 * Free Software Foundation; either version 2 of the License, or (at your
19 * option) any later version.
20 */
21
22#include <linux/init.h>
23#include <linux/of_platform.h>
24#include <linux/rtc.h>
25
26#include <asm/machdep.h>
27#include <asm/prom.h>
28#include <asm/udbg.h>
29#include <asm/time.h>
30#include <asm/uic.h>
31#include <asm/ppc4xx.h>
32#include <asm/mpic.h>
33#include <asm/mmu.h>
34
35static __initdata struct of_device_id iss4xx_of_bus[] = {
36 { .compatible = "ibm,plb4", },
37 { .compatible = "ibm,plb6", },
38 { .compatible = "ibm,opb", },
39 { .compatible = "ibm,ebc", },
40 {},
41};
42
43static int __init iss4xx_device_probe(void)
44{
45 of_platform_bus_probe(NULL, iss4xx_of_bus, NULL);
46 of_instantiate_rtc();
47
48 return 0;
49}
50machine_device_initcall(iss4xx, iss4xx_device_probe);
51
52/* We can have either UICs or MPICs */
53static void __init iss4xx_init_irq(void)
54{
55 struct device_node *np;
56
57 /* Find top level interrupt controller */
58 for_each_node_with_property(np, "interrupt-controller") {
59 if (of_get_property(np, "interrupts", NULL) == NULL)
60 break;
61 }
62 if (np == NULL)
63 panic("Can't find top level interrupt controller");
64
65 /* Check type and do appropriate initialization */
66 if (of_device_is_compatible(np, "ibm,uic")) {
67 uic_init_tree();
68 ppc_md.get_irq = uic_get_irq;
69#ifdef CONFIG_MPIC
70 } else if (of_device_is_compatible(np, "chrp,open-pic")) {
71 /* The MPIC driver will get everything it needs from the
72 * device-tree, just pass 0 to all arguments
73 */
74 struct mpic *mpic = mpic_alloc(np, 0, MPIC_PRIMARY, 0, 0,
75 " MPIC ");
76 BUG_ON(mpic == NULL);
77 mpic_init(mpic);
78 ppc_md.get_irq = mpic_get_irq;
79#endif
80 } else
81 panic("Unrecognized top level interrupt controller");
82}
83
84#ifdef CONFIG_SMP
85static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
86{
87 mpic_setup_this_cpu();
88}
89
90static void __cpuinit smp_iss4xx_kick_cpu(int cpu)
91{
92 struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
93 const u64 *spin_table_addr_prop;
94 u32 *spin_table;
95 extern void start_secondary_47x(void);
96
97 BUG_ON(cpunode == NULL);
98
99 /* Assume spin table. We could test for the enable-method in
100 * the device-tree but currently there's little point as it's
101 * our only supported method
102 */
103 spin_table_addr_prop = of_get_property(cpunode, "cpu-release-addr",
104 NULL);
105 if (spin_table_addr_prop == NULL) {
106 pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu);
107 return;
108 }
109
110 /* Assume it's mapped as part of the linear mapping. This is a bit
111 * fishy but will work fine for now
112 */
113 spin_table = (u32 *)__va(*spin_table_addr_prop);
114 pr_debug("CPU%d: Spin table mapped at %p\n", cpu, spin_table);
115
116 spin_table[3] = cpu;
117 smp_wmb();
118 spin_table[1] = __pa(start_secondary_47x);
119 mb();
120}
121
122static struct smp_ops_t iss_smp_ops = {
123 .probe = smp_mpic_probe,
124 .message_pass = smp_mpic_message_pass,
125 .setup_cpu = smp_iss4xx_setup_cpu,
126 .kick_cpu = smp_iss4xx_kick_cpu,
127 .give_timebase = smp_generic_give_timebase,
128 .take_timebase = smp_generic_take_timebase,
129};
130
131static void __init iss4xx_smp_init(void)
132{
133 if (mmu_has_feature(MMU_FTR_TYPE_47x))
134 smp_ops = &iss_smp_ops;
135}
136
137#else /* CONFIG_SMP */
138static void __init iss4xx_smp_init(void) { }
139#endif /* CONFIG_SMP */
140
141static void __init iss4xx_setup_arch(void)
142{
143 iss4xx_smp_init();
144}
145
146/*
147 * Called very early, MMU is off, device-tree isn't unflattened
148 */
149static int __init iss4xx_probe(void)
150{
151 unsigned long root = of_get_flat_dt_root();
152
153 if (!of_flat_dt_is_compatible(root, "ibm,iss-4xx"))
154 return 0;
155
156 return 1;
157}
158
159define_machine(iss4xx) {
160 .name = "ISS-4xx",
161 .probe = iss4xx_probe,
162 .progress = udbg_progress,
163 .init_IRQ = iss4xx_init_irq,
164 .setup_arch = iss4xx_setup_arch,
165 .restart = ppc4xx_reset_system,
166 .calibrate_decr = generic_calibrate_decr,
167};
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
index 0b4f883b20eb..ae525e4745d2 100644
--- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c
@@ -74,6 +74,7 @@ static int __init mpc831x_rdb_probe(void)
74static struct of_device_id __initdata of_bus_ids[] = { 74static struct of_device_id __initdata of_bus_ids[] = {
75 { .compatible = "simple-bus" }, 75 { .compatible = "simple-bus" },
76 { .compatible = "gianfar" }, 76 { .compatible = "gianfar" },
77 { .compatible = "gpio-leds", },
77 {}, 78 {},
78}; 79};
79 80
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
index a1908d261240..e00801c42540 100644
--- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c
+++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c
@@ -72,6 +72,7 @@ static struct of_device_id mpc837x_ids[] = {
72 { .compatible = "soc", }, 72 { .compatible = "soc", },
73 { .compatible = "simple-bus", }, 73 { .compatible = "simple-bus", },
74 { .compatible = "gianfar", }, 74 { .compatible = "gianfar", },
75 { .compatible = "gpio-leds", },
75 {}, 76 {},
76}; 77};
77 78
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
index 5abe137f6309..018cc67be426 100644
--- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c
@@ -83,7 +83,8 @@ static struct of_device_id __initdata mpc8610_ids[] = {
83 { .compatible = "fsl,mpc8610-immr", }, 83 { .compatible = "fsl,mpc8610-immr", },
84 { .compatible = "fsl,mpc8610-guts", }, 84 { .compatible = "fsl,mpc8610-guts", },
85 { .compatible = "simple-bus", }, 85 { .compatible = "simple-bus", },
86 { .compatible = "gianfar", }, 86 /* So that the DMA channel nodes can be probed individually: */
87 { .compatible = "fsl,eloplus-dma", },
87 {} 88 {}
88}; 89};
89 90
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index a8aae0b54579..d361f8119b1e 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -43,7 +43,7 @@ config 40x
43 select PPC_PCI_CHOICE 43 select PPC_PCI_CHOICE
44 44
45config 44x 45config 44x
46 bool "AMCC 44x" 46 bool "AMCC 44x, 46x or 47x"
47 select PPC_DCR_NATIVE 47 select PPC_DCR_NATIVE
48 select PPC_UDBG_16550 48 select PPC_UDBG_16550
49 select 4xx_SOC 49 select 4xx_SOC
@@ -294,7 +294,7 @@ config PPC_PERF_CTRS
294 This enables the powerpc-specific perf_event back-end. 294 This enables the powerpc-specific perf_event back-end.
295 295
296config SMP 296config SMP
297 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE 297 depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
298 bool "Symmetric multi-processing support" 298 bool "Symmetric multi-processing support"
299 ---help--- 299 ---help---
300 This enables support for systems with more than one CPU. If you have 300 This enables support for systems with more than one CPU. If you have
@@ -322,6 +322,7 @@ config NR_CPUS
322config NOT_COHERENT_CACHE 322config NOT_COHERENT_CACHE
323 bool 323 bool
324 depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON 324 depends on 4xx || 8xx || E200 || PPC_MPC512x || GAMECUBE_COMMON
325 default n if PPC_47x
325 default y 326 default y
326 327
327config CHECK_CACHE_COHERENCY 328config CHECK_CACHE_COHERENCY
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index e6506cd0ff94..bfa2c0cb3d1e 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -118,7 +118,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
118 policy->cur = cbe_freqs[cur_pmode].frequency; 118 policy->cur = cbe_freqs[cur_pmode].frequency;
119 119
120#ifdef CONFIG_SMP 120#ifdef CONFIG_SMP
121 cpumask_copy(policy->cpus, &per_cpu(cpu_sibling_map, policy->cpu)); 121 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
122#endif 122#endif
123 123
124 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 124 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
index fba5bf915073..32a56c6dfa72 100644
--- a/arch/powerpc/platforms/iseries/exception.S
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -252,8 +252,8 @@ decrementer_iSeries_masked:
252 li r11,1 252 li r11,1
253 ld r12,PACALPPACAPTR(r13) 253 ld r12,PACALPPACAPTR(r13)
254 stb r11,LPPACADECRINT(r12) 254 stb r11,LPPACADECRINT(r12)
255 LOAD_REG_IMMEDIATE(r12, tb_ticks_per_jiffy) 255 li r12,-1
256 lwz r12,0(r12) 256 clrldi r12,r12,33 /* set DEC to 0x7fffffff */
257 mtspr SPRN_DEC,r12 257 mtspr SPRN_DEC,r12
258 /* fall through */ 258 /* fall through */
259 259
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
index b841c9a9db87..3fc2e6494b8b 100644
--- a/arch/powerpc/platforms/iseries/pci.c
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/of.h> 34#include <linux/of.h>
35#include <linux/ratelimit.h>
35 36
36#include <asm/types.h> 37#include <asm/types.h>
37#include <asm/io.h> 38#include <asm/io.h>
@@ -584,14 +585,9 @@ static inline struct device_node *xlate_iomm_address(
584 585
585 orig_addr = (unsigned long __force)addr; 586 orig_addr = (unsigned long __force)addr;
586 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) { 587 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
587 static unsigned long last_jiffies; 588 static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
588 static int num_printed;
589 589
590 if (time_after(jiffies, last_jiffies + 60 * HZ)) { 590 if (__ratelimit(&ratelimit))
591 last_jiffies = jiffies;
592 num_printed = 0;
593 }
594 if (num_printed++ < 10)
595 printk(KERN_ERR 591 printk(KERN_ERR
596 "iSeries_%s: invalid access at IO address %p\n", 592 "iSeries_%s: invalid access at IO address %p\n",
597 func, addr); 593 func, addr);
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
index 722335e32fd4..6590850045af 100644
--- a/arch/powerpc/platforms/iseries/smp.c
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -83,7 +83,7 @@ static void smp_iSeries_message_pass(int target, int msg)
83 83
84static int smp_iSeries_probe(void) 84static int smp_iSeries_probe(void)
85{ 85{
86 return cpus_weight(cpu_possible_map); 86 return cpumask_weight(cpu_possible_mask);
87} 87}
88 88
89static void smp_iSeries_kick_cpu(int nr) 89static void smp_iSeries_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pasemi/cpufreq.c b/arch/powerpc/platforms/pasemi/cpufreq.c
index d35e0520abf0..c16537bc0c6e 100644
--- a/arch/powerpc/platforms/pasemi/cpufreq.c
+++ b/arch/powerpc/platforms/pasemi/cpufreq.c
@@ -213,7 +213,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
213 pr_debug("current astate is at %d\n",cur_astate); 213 pr_debug("current astate is at %d\n",cur_astate);
214 214
215 policy->cur = pas_freqs[cur_astate].frequency; 215 policy->cur = pas_freqs[cur_astate].frequency;
216 cpumask_copy(policy->cpus, &cpu_online_map); 216 cpumask_copy(policy->cpus, cpu_online_mask);
217 217
218 ppc_proc_freq = policy->cur * 1000ul; 218 ppc_proc_freq = policy->cur * 1000ul;
219 219
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c
index 3ca09d3ccce3..9650c6029c82 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_64.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_64.c
@@ -362,7 +362,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy)
362 /* secondary CPUs are tied to the primary one by the 362 /* secondary CPUs are tied to the primary one by the
363 * cpufreq core if in the secondary policy we tell it that 363 * cpufreq core if in the secondary policy we tell it that
364 * it actually must be one policy together with all others. */ 364 * it actually must be one policy together with all others. */
365 cpumask_copy(policy->cpus, &cpu_online_map); 365 cpumask_copy(policy->cpus, cpu_online_mask);
366 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); 366 cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu);
367 367
368 return cpufreq_frequency_table_cpuinfo(policy, 368 return cpufreq_frequency_table_cpuinfo(policy,
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c
index f45331ab97cb..06a137c5b8bb 100644
--- a/arch/powerpc/platforms/powermac/low_i2c.c
+++ b/arch/powerpc/platforms/powermac/low_i2c.c
@@ -592,7 +592,7 @@ static void __init kw_i2c_probe(void)
592 /* Probe keywest-i2c busses */ 592 /* Probe keywest-i2c busses */
593 for_each_compatible_node(np, "i2c","keywest-i2c") { 593 for_each_compatible_node(np, "i2c","keywest-i2c") {
594 struct pmac_i2c_host_kw *host; 594 struct pmac_i2c_host_kw *host;
595 int multibus, chans, i; 595 int multibus;
596 596
597 /* Found one, init a host structure */ 597 /* Found one, init a host structure */
598 host = kw_i2c_host_init(np); 598 host = kw_i2c_host_init(np);
@@ -614,6 +614,8 @@ static void __init kw_i2c_probe(void)
614 * parent type 614 * parent type
615 */ 615 */
616 if (multibus) { 616 if (multibus) {
617 int chans, i;
618
617 parent = of_get_parent(np); 619 parent = of_get_parent(np);
618 if (parent == NULL) 620 if (parent == NULL)
619 continue; 621 continue;
@@ -1258,8 +1260,7 @@ static void pmac_i2c_do_end(struct pmf_function *func, void *instdata)
1258 if (inst == NULL) 1260 if (inst == NULL)
1259 return; 1261 return;
1260 pmac_i2c_close(inst->bus); 1262 pmac_i2c_close(inst->bus);
1261 if (inst) 1263 kfree(inst);
1262 kfree(inst);
1263} 1264}
1264 1265
1265static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) 1266static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len)
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 3362e781b6a7..f0bc08f6c1f0 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -33,6 +33,8 @@ extern void pmac_setup_pci_dma(void);
33extern void pmac_check_ht_link(void); 33extern void pmac_check_ht_link(void);
34 34
35extern void pmac_setup_smp(void); 35extern void pmac_setup_smp(void);
36extern void pmac32_cpu_die(void);
37extern void low_cpu_die(void) __attribute__((noreturn));
36 38
37extern int pmac_nvram_init(void); 39extern int pmac_nvram_init(void);
38extern void pmac_pic_init(void); 40extern void pmac_pic_init(void);
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 15c2241f9c72..f1d0132ebcc7 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -480,7 +480,7 @@ static void __init pmac_init_early(void)
480#endif 480#endif
481 481
482 /* SMP Init has to be done early as we need to patch up 482 /* SMP Init has to be done early as we need to patch up
483 * cpu_possible_map before interrupt stacks are allocated 483 * cpu_possible_mask before interrupt stacks are allocated
484 * or kaboom... 484 * or kaboom...
485 */ 485 */
486#ifdef CONFIG_SMP 486#ifdef CONFIG_SMP
@@ -646,7 +646,7 @@ static int pmac_pci_probe_mode(struct pci_bus *bus)
646/* access per cpu vars from generic smp.c */ 646/* access per cpu vars from generic smp.c */
647DECLARE_PER_CPU(int, cpu_state); 647DECLARE_PER_CPU(int, cpu_state);
648 648
649static void pmac_cpu_die(void) 649static void pmac64_cpu_die(void)
650{ 650{
651 /* 651 /*
652 * turn off as much as possible, we'll be 652 * turn off as much as possible, we'll be
@@ -717,8 +717,13 @@ define_machine(powermac) {
717 .pcibios_after_init = pmac_pcibios_after_init, 717 .pcibios_after_init = pmac_pcibios_after_init,
718 .phys_mem_access_prot = pci_phys_mem_access_prot, 718 .phys_mem_access_prot = pci_phys_mem_access_prot,
719#endif 719#endif
720#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC64) 720#ifdef CONFIG_HOTPLUG_CPU
721 .cpu_die = pmac_cpu_die, 721#ifdef CONFIG_PPC64
722 .cpu_die = pmac64_cpu_die,
723#endif
724#ifdef CONFIG_PPC32
725 .cpu_die = pmac32_cpu_die,
726#endif
722#endif 727#endif
723#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) 728#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
724 .cpu_die = generic_mach_cpu_die, 729 .cpu_die = generic_mach_cpu_die,
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 6898e8241cd0..c95215f4f8b6 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -53,6 +53,8 @@
53#include <asm/pmac_low_i2c.h> 53#include <asm/pmac_low_i2c.h>
54#include <asm/pmac_pfunc.h> 54#include <asm/pmac_pfunc.h>
55 55
56#include "pmac.h"
57
56#undef DEBUG 58#undef DEBUG
57 59
58#ifdef DEBUG 60#ifdef DEBUG
@@ -315,7 +317,7 @@ static int __init smp_psurge_probe(void)
315 /* This is necessary because OF doesn't know about the 317 /* This is necessary because OF doesn't know about the
316 * secondary cpu(s), and thus there aren't nodes in the 318 * secondary cpu(s), and thus there aren't nodes in the
317 * device tree for them, and smp_setup_cpu_maps hasn't 319 * device tree for them, and smp_setup_cpu_maps hasn't
318 * set their bits in cpu_present_map. 320 * set their bits in cpu_present_mask.
319 */ 321 */
320 if (ncpus > NR_CPUS) 322 if (ncpus > NR_CPUS)
321 ncpus = NR_CPUS; 323 ncpus = NR_CPUS;
@@ -878,10 +880,9 @@ int smp_core99_cpu_disable(void)
878 return 0; 880 return 0;
879} 881}
880 882
881extern void low_cpu_die(void) __attribute__((noreturn)); /* in sleep.S */
882static int cpu_dead[NR_CPUS]; 883static int cpu_dead[NR_CPUS];
883 884
884void cpu_die(void) 885void pmac32_cpu_die(void)
885{ 886{
886 local_irq_disable(); 887 local_irq_disable();
887 cpu_dead[smp_processor_id()] = 1; 888 cpu_dead[smp_processor_id()] = 1;
@@ -944,7 +945,7 @@ void __init pmac_setup_smp(void)
944 } 945 }
945#ifdef CONFIG_PPC32 946#ifdef CONFIG_PPC32
946 else { 947 else {
947 /* We have to set bits in cpu_possible_map here since the 948 /* We have to set bits in cpu_possible_mask here since the
948 * secondary CPU(s) aren't in the device tree. Various 949 * secondary CPU(s) aren't in the device tree. Various
949 * things won't be initialized for CPUs not in the possible 950 * things won't be initialized for CPUs not in the possible
950 * map, so we really need to fix it up here. 951 * map, so we really need to fix it up here.
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 0ff5174ae4f5..3dbef309bc8d 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -7,7 +7,7 @@ EXTRA_CFLAGS += -DDEBUG
7endif 7endif
8 8
9obj-y := lpar.o hvCall.o nvram.o reconfig.o \ 9obj-y := lpar.o hvCall.o nvram.o reconfig.o \
10 setup.o iommu.o ras.o \ 10 setup.o iommu.o event_sources.o ras.o \
11 firmware.o power.o dlpar.o 11 firmware.o power.o dlpar.o
12obj-$(CONFIG_SMP) += smp.o 12obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_XICS) += xics.o 13obj-$(CONFIG_XICS) += xics.o
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index e1682bc168a3..d71e58584086 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -79,13 +79,12 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa)
79 * prepend this to the full_name. 79 * prepend this to the full_name.
80 */ 80 */
81 name = (char *)ccwa + ccwa->name_offset; 81 name = (char *)ccwa + ccwa->name_offset;
82 dn->full_name = kmalloc(strlen(name) + 2, GFP_KERNEL); 82 dn->full_name = kasprintf(GFP_KERNEL, "/%s", name);
83 if (!dn->full_name) { 83 if (!dn->full_name) {
84 kfree(dn); 84 kfree(dn);
85 return NULL; 85 return NULL;
86 } 86 }
87 87
88 sprintf(dn->full_name, "/%s", name);
89 return dn; 88 return dn;
90} 89}
91 90
@@ -410,15 +409,13 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
410 * directory of the device tree. CPUs actually live in the 409 * directory of the device tree. CPUs actually live in the
411 * cpus directory so we need to fixup the full_name. 410 * cpus directory so we need to fixup the full_name.
412 */ 411 */
413 cpu_name = kzalloc(strlen(dn->full_name) + strlen("/cpus") + 1, 412 cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
414 GFP_KERNEL);
415 if (!cpu_name) { 413 if (!cpu_name) {
416 dlpar_free_cc_nodes(dn); 414 dlpar_free_cc_nodes(dn);
417 rc = -ENOMEM; 415 rc = -ENOMEM;
418 goto out; 416 goto out;
419 } 417 }
420 418
421 sprintf(cpu_name, "/cpus%s", dn->full_name);
422 kfree(dn->full_name); 419 kfree(dn->full_name);
423 dn->full_name = cpu_name; 420 dn->full_name = cpu_name;
424 421
@@ -433,6 +430,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
433 if (rc) { 430 if (rc) {
434 dlpar_release_drc(drc_index); 431 dlpar_release_drc(drc_index);
435 dlpar_free_cc_nodes(dn); 432 dlpar_free_cc_nodes(dn);
433 goto out;
436 } 434 }
437 435
438 rc = dlpar_online_cpu(dn); 436 rc = dlpar_online_cpu(dn);
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 7df7fbb7cacb..34b7dc12e731 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -749,7 +749,7 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
749 /* Determine type of EEH reset required by device, 749 /* Determine type of EEH reset required by device,
750 * default hot reset or fundamental reset 750 * default hot reset or fundamental reset
751 */ 751 */
752 if (dev->needs_freset) 752 if (dev && dev->needs_freset)
753 rtas_pci_slot_reset(pdn, 3); 753 rtas_pci_slot_reset(pdn, 3);
754 else 754 else
755 rtas_pci_slot_reset(pdn, 1); 755 rtas_pci_slot_reset(pdn, 1);
diff --git a/arch/powerpc/platforms/pseries/event_sources.c b/arch/powerpc/platforms/pseries/event_sources.c
new file mode 100644
index 000000000000..e889c9d9586a
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/event_sources.c
@@ -0,0 +1,79 @@
1/*
2 * Copyright (C) 2001 Dave Engebretsen IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <asm/prom.h>
20
21#include "pseries.h"
22
23void request_event_sources_irqs(struct device_node *np,
24 irq_handler_t handler,
25 const char *name)
26{
27 int i, index, count = 0;
28 struct of_irq oirq;
29 const u32 *opicprop;
30 unsigned int opicplen;
31 unsigned int virqs[16];
32
33 /* Check for obsolete "open-pic-interrupt" property. If present, then
34 * map those interrupts using the default interrupt host and default
35 * trigger
36 */
37 opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
38 if (opicprop) {
39 opicplen /= sizeof(u32);
40 for (i = 0; i < opicplen; i++) {
41 if (count > 15)
42 break;
43 virqs[count] = irq_create_mapping(NULL, *(opicprop++));
44 if (virqs[count] == NO_IRQ)
45 printk(KERN_ERR "Unable to allocate interrupt "
46 "number for %s\n", np->full_name);
47 else
48 count++;
49
50 }
51 }
52 /* Else use normal interrupt tree parsing */
53 else {
54 /* First try to do a proper OF tree parsing */
55 for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
56 index++) {
57 if (count > 15)
58 break;
59 virqs[count] = irq_create_of_mapping(oirq.controller,
60 oirq.specifier,
61 oirq.size);
62 if (virqs[count] == NO_IRQ)
63 printk(KERN_ERR "Unable to allocate interrupt "
64 "number for %s\n", np->full_name);
65 else
66 count++;
67 }
68 }
69
70 /* Now request them */
71 for (i = 0; i < count; i++) {
72 if (request_irq(virqs[i], handler, 0, name, NULL)) {
73 printk(KERN_ERR "Unable to request interrupt %d for "
74 "%s\n", virqs[i], np->full_name);
75 return;
76 }
77 }
78}
79
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index a8e1d5d17a28..8f85f399ab9f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -154,30 +154,6 @@ static void pseries_mach_cpu_die(void)
154 for(;;); 154 for(;;);
155} 155}
156 156
157static int qcss_tok; /* query-cpu-stopped-state token */
158
159/* Get state of physical CPU.
160 * Return codes:
161 * 0 - The processor is in the RTAS stopped state
162 * 1 - stop-self is in progress
163 * 2 - The processor is not in the RTAS stopped state
164 * -1 - Hardware Error
165 * -2 - Hardware Busy, Try again later.
166 */
167static int query_cpu_stopped(unsigned int pcpu)
168{
169 int cpu_status, status;
170
171 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
172 if (status != 0) {
173 printk(KERN_ERR
174 "RTAS query-cpu-stopped-state failed: %i\n", status);
175 return status;
176 }
177
178 return cpu_status;
179}
180
181static int pseries_cpu_disable(void) 157static int pseries_cpu_disable(void)
182{ 158{
183 int cpu = smp_processor_id(); 159 int cpu = smp_processor_id();
@@ -187,7 +163,7 @@ static int pseries_cpu_disable(void)
187 163
188 /*fix boot_cpuid here*/ 164 /*fix boot_cpuid here*/
189 if (cpu == boot_cpuid) 165 if (cpu == boot_cpuid)
190 boot_cpuid = any_online_cpu(cpu_online_map); 166 boot_cpuid = cpumask_any(cpu_online_mask);
191 167
192 /* FIXME: abstract this to not be platform specific later on */ 168 /* FIXME: abstract this to not be platform specific later on */
193 xics_migrate_irqs_away(); 169 xics_migrate_irqs_away();
@@ -224,8 +200,9 @@ static void pseries_cpu_die(unsigned int cpu)
224 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) { 200 } else if (get_preferred_offline_state(cpu) == CPU_STATE_OFFLINE) {
225 201
226 for (tries = 0; tries < 25; tries++) { 202 for (tries = 0; tries < 25; tries++) {
227 cpu_status = query_cpu_stopped(pcpu); 203 cpu_status = smp_query_cpu_stopped(pcpu);
228 if (cpu_status == 0 || cpu_status == -1) 204 if (cpu_status == QCSS_STOPPED ||
205 cpu_status == QCSS_HARDWARE_ERROR)
229 break; 206 break;
230 cpu_relax(); 207 cpu_relax();
231 } 208 }
@@ -245,7 +222,7 @@ static void pseries_cpu_die(unsigned int cpu)
245} 222}
246 223
247/* 224/*
248 * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle 225 * Update cpu_present_mask and paca(s) for a new cpu node. The wrinkle
249 * here is that a cpu device node may represent up to two logical cpus 226 * here is that a cpu device node may represent up to two logical cpus
250 * in the SMT case. We must honor the assumption in other code that 227 * in the SMT case. We must honor the assumption in other code that
251 * the logical ids for sibling SMT threads x and y are adjacent, such 228 * the logical ids for sibling SMT threads x and y are adjacent, such
@@ -254,7 +231,7 @@ static void pseries_cpu_die(unsigned int cpu)
254static int pseries_add_processor(struct device_node *np) 231static int pseries_add_processor(struct device_node *np)
255{ 232{
256 unsigned int cpu; 233 unsigned int cpu;
257 cpumask_t candidate_map, tmp = CPU_MASK_NONE; 234 cpumask_var_t candidate_mask, tmp;
258 int err = -ENOSPC, len, nthreads, i; 235 int err = -ENOSPC, len, nthreads, i;
259 const u32 *intserv; 236 const u32 *intserv;
260 237
@@ -262,48 +239,53 @@ static int pseries_add_processor(struct device_node *np)
262 if (!intserv) 239 if (!intserv)
263 return 0; 240 return 0;
264 241
242 zalloc_cpumask_var(&candidate_mask, GFP_KERNEL);
243 zalloc_cpumask_var(&tmp, GFP_KERNEL);
244
265 nthreads = len / sizeof(u32); 245 nthreads = len / sizeof(u32);
266 for (i = 0; i < nthreads; i++) 246 for (i = 0; i < nthreads; i++)
267 cpu_set(i, tmp); 247 cpumask_set_cpu(i, tmp);
268 248
269 cpu_maps_update_begin(); 249 cpu_maps_update_begin();
270 250
271 BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); 251 BUG_ON(!cpumask_subset(cpu_present_mask, cpu_possible_mask));
272 252
273 /* Get a bitmap of unoccupied slots. */ 253 /* Get a bitmap of unoccupied slots. */
274 cpus_xor(candidate_map, cpu_possible_map, cpu_present_map); 254 cpumask_xor(candidate_mask, cpu_possible_mask, cpu_present_mask);
275 if (cpus_empty(candidate_map)) { 255 if (cpumask_empty(candidate_mask)) {
276 /* If we get here, it most likely means that NR_CPUS is 256 /* If we get here, it most likely means that NR_CPUS is
277 * less than the partition's max processors setting. 257 * less than the partition's max processors setting.
278 */ 258 */
279 printk(KERN_ERR "Cannot add cpu %s; this system configuration" 259 printk(KERN_ERR "Cannot add cpu %s; this system configuration"
280 " supports %d logical cpus.\n", np->full_name, 260 " supports %d logical cpus.\n", np->full_name,
281 cpus_weight(cpu_possible_map)); 261 cpumask_weight(cpu_possible_mask));
282 goto out_unlock; 262 goto out_unlock;
283 } 263 }
284 264
285 while (!cpus_empty(tmp)) 265 while (!cpumask_empty(tmp))
286 if (cpus_subset(tmp, candidate_map)) 266 if (cpumask_subset(tmp, candidate_mask))
287 /* Found a range where we can insert the new cpu(s) */ 267 /* Found a range where we can insert the new cpu(s) */
288 break; 268 break;
289 else 269 else
290 cpus_shift_left(tmp, tmp, nthreads); 270 cpumask_shift_left(tmp, tmp, nthreads);
291 271
292 if (cpus_empty(tmp)) { 272 if (cpumask_empty(tmp)) {
293 printk(KERN_ERR "Unable to find space in cpu_present_map for" 273 printk(KERN_ERR "Unable to find space in cpu_present_mask for"
294 " processor %s with %d thread(s)\n", np->name, 274 " processor %s with %d thread(s)\n", np->name,
295 nthreads); 275 nthreads);
296 goto out_unlock; 276 goto out_unlock;
297 } 277 }
298 278
299 for_each_cpu_mask(cpu, tmp) { 279 for_each_cpu(cpu, tmp) {
300 BUG_ON(cpu_isset(cpu, cpu_present_map)); 280 BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask));
301 set_cpu_present(cpu, true); 281 set_cpu_present(cpu, true);
302 set_hard_smp_processor_id(cpu, *intserv++); 282 set_hard_smp_processor_id(cpu, *intserv++);
303 } 283 }
304 err = 0; 284 err = 0;
305out_unlock: 285out_unlock:
306 cpu_maps_update_done(); 286 cpu_maps_update_done();
287 free_cpumask_var(candidate_mask);
288 free_cpumask_var(tmp);
307 return err; 289 return err;
308} 290}
309 291
@@ -334,7 +316,7 @@ static void pseries_remove_processor(struct device_node *np)
334 set_hard_smp_processor_id(cpu, -1); 316 set_hard_smp_processor_id(cpu, -1);
335 break; 317 break;
336 } 318 }
337 if (cpu == NR_CPUS) 319 if (cpu >= nr_cpu_ids)
338 printk(KERN_WARNING "Could not find cpu to remove " 320 printk(KERN_WARNING "Could not find cpu to remove "
339 "with physical id 0x%x\n", intserv[i]); 321 "with physical id 0x%x\n", intserv[i]);
340 } 322 }
@@ -388,6 +370,7 @@ static int __init pseries_cpu_hotplug_init(void)
388 struct device_node *np; 370 struct device_node *np;
389 const char *typep; 371 const char *typep;
390 int cpu; 372 int cpu;
373 int qcss_tok;
391 374
392 for_each_node_by_name(np, "interrupt-controller") { 375 for_each_node_by_name(np, "interrupt-controller") {
393 typep = of_get_property(np, "compatible", NULL); 376 typep = of_get_property(np, "compatible", NULL);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 383a5d0e9818..48d20573e4de 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -228,3 +228,41 @@ _GLOBAL(plpar_hcall9)
228 mtcrf 0xff,r0 228 mtcrf 0xff,r0
229 229
230 blr /* return r3 = status */ 230 blr /* return r3 = status */
231
232/* See plpar_hcall_raw to see why this is needed */
233_GLOBAL(plpar_hcall9_raw)
234 HMT_MEDIUM
235
236 mfcr r0
237 stw r0,8(r1)
238
239 std r4,STK_PARM(r4)(r1) /* Save ret buffer */
240
241 mr r4,r5
242 mr r5,r6
243 mr r6,r7
244 mr r7,r8
245 mr r8,r9
246 mr r9,r10
247 ld r10,STK_PARM(r11)(r1) /* put arg7 in R10 */
248 ld r11,STK_PARM(r12)(r1) /* put arg8 in R11 */
249 ld r12,STK_PARM(r13)(r1) /* put arg9 in R12 */
250
251 HVSC /* invoke the hypervisor */
252
253 mr r0,r12
254 ld r12,STK_PARM(r4)(r1)
255 std r4, 0(r12)
256 std r5, 8(r12)
257 std r6, 16(r12)
258 std r7, 24(r12)
259 std r8, 32(r12)
260 std r9, 40(r12)
261 std r10,48(r12)
262 std r11,56(r12)
263 std r0, 64(r12)
264
265 lwz r0,8(r1)
266 mtcrf 0xff,r0
267
268 blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 0707653612ba..cf79b46d8f88 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -367,21 +367,28 @@ static void pSeries_lpar_hptab_clear(void)
367{ 367{
368 unsigned long size_bytes = 1UL << ppc64_pft_size; 368 unsigned long size_bytes = 1UL << ppc64_pft_size;
369 unsigned long hpte_count = size_bytes >> 4; 369 unsigned long hpte_count = size_bytes >> 4;
370 unsigned long dummy1, dummy2, dword0; 370 struct {
371 unsigned long pteh;
372 unsigned long ptel;
373 } ptes[4];
371 long lpar_rc; 374 long lpar_rc;
372 int i; 375 int i, j;
373 376
374 /* TODO: Use bulk call */ 377 /* Read in batches of 4,
375 for (i = 0; i < hpte_count; i++) { 378 * invalidate only valid entries not in the VRMA
376 /* dont remove HPTEs with VRMA mappings */ 379 * hpte_count will be a multiple of 4
377 lpar_rc = plpar_pte_remove_raw(H_ANDCOND, i, HPTE_V_1TB_SEG, 380 */
378 &dummy1, &dummy2); 381 for (i = 0; i < hpte_count; i += 4) {
379 if (lpar_rc == H_NOT_FOUND) { 382 lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes);
380 lpar_rc = plpar_pte_read_raw(0, i, &dword0, &dummy1); 383 if (lpar_rc != H_SUCCESS)
381 if (!lpar_rc && ((dword0 & HPTE_V_VRMA_MASK) 384 continue;
382 != HPTE_V_VRMA_MASK)) 385 for (j = 0; j < 4; j++){
383 /* Can be hpte for 1TB Seg. So remove it */ 386 if ((ptes[j].pteh & HPTE_V_VRMA_MASK) ==
384 plpar_pte_remove_raw(0, i, 0, &dummy1, &dummy2); 387 HPTE_V_VRMA_MASK)
388 continue;
389 if (ptes[j].pteh & HPTE_V_VALID)
390 plpar_pte_remove_raw(0, i + j, 0,
391 &(ptes[j].pteh), &(ptes[j].ptel));
385 } 392 }
386 } 393 }
387} 394}
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index a05f8d427856..d9801117124b 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -4,6 +4,14 @@
4#include <asm/hvcall.h> 4#include <asm/hvcall.h>
5#include <asm/page.h> 5#include <asm/page.h>
6 6
7/* Get state of physical CPU from query_cpu_stopped */
8int smp_query_cpu_stopped(unsigned int pcpu);
9#define QCSS_STOPPED 0
10#define QCSS_STOPPING 1
11#define QCSS_NOT_STOPPED 2
12#define QCSS_HARDWARE_ERROR -1
13#define QCSS_HARDWARE_BUSY -2
14
7static inline long poll_pending(void) 15static inline long poll_pending(void)
8{ 16{
9 return plpar_hcall_norets(H_POLL_PENDING); 17 return plpar_hcall_norets(H_POLL_PENDING);
@@ -183,6 +191,24 @@ static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
183 return rc; 191 return rc;
184} 192}
185 193
194/*
195 * plpar_pte_read_4_raw can be called in real mode.
196 * ptes must be 8*sizeof(unsigned long)
197 */
198static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
199 unsigned long *ptes)
200
201{
202 long rc;
203 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
204
205 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
206
207 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
208
209 return rc;
210}
211
186static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex, 212static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
187 unsigned long avpn) 213 unsigned long avpn)
188{ 214{
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 9e17c0d2a0c8..40c93cad91d2 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -10,6 +10,13 @@
10#ifndef _PSERIES_PSERIES_H 10#ifndef _PSERIES_PSERIES_H
11#define _PSERIES_PSERIES_H 11#define _PSERIES_PSERIES_H
12 12
13#include <linux/interrupt.h>
14
15struct device_node;
16
17extern void request_event_sources_irqs(struct device_node *np,
18 irq_handler_t handler, const char *name);
19
13extern void __init fw_feature_init(const char *hypertas, unsigned long len); 20extern void __init fw_feature_init(const char *hypertas, unsigned long len);
14 21
15struct pt_regs; 22struct pt_regs;
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index db940d2c39a0..41a3e9a039ed 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -67,63 +67,6 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
67static irqreturn_t ras_error_interrupt(int irq, void *dev_id); 67static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
68 68
69 69
70static void request_ras_irqs(struct device_node *np,
71 irq_handler_t handler,
72 const char *name)
73{
74 int i, index, count = 0;
75 struct of_irq oirq;
76 const u32 *opicprop;
77 unsigned int opicplen;
78 unsigned int virqs[16];
79
80 /* Check for obsolete "open-pic-interrupt" property. If present, then
81 * map those interrupts using the default interrupt host and default
82 * trigger
83 */
84 opicprop = of_get_property(np, "open-pic-interrupt", &opicplen);
85 if (opicprop) {
86 opicplen /= sizeof(u32);
87 for (i = 0; i < opicplen; i++) {
88 if (count > 15)
89 break;
90 virqs[count] = irq_create_mapping(NULL, *(opicprop++));
91 if (virqs[count] == NO_IRQ)
92 printk(KERN_ERR "Unable to allocate interrupt "
93 "number for %s\n", np->full_name);
94 else
95 count++;
96
97 }
98 }
99 /* Else use normal interrupt tree parsing */
100 else {
101 /* First try to do a proper OF tree parsing */
102 for (index = 0; of_irq_map_one(np, index, &oirq) == 0;
103 index++) {
104 if (count > 15)
105 break;
106 virqs[count] = irq_create_of_mapping(oirq.controller,
107 oirq.specifier,
108 oirq.size);
109 if (virqs[count] == NO_IRQ)
110 printk(KERN_ERR "Unable to allocate interrupt "
111 "number for %s\n", np->full_name);
112 else
113 count++;
114 }
115 }
116
117 /* Now request them */
118 for (i = 0; i < count; i++) {
119 if (request_irq(virqs[i], handler, 0, name, NULL)) {
120 printk(KERN_ERR "Unable to request interrupt %d for "
121 "%s\n", virqs[i], np->full_name);
122 return;
123 }
124 }
125}
126
127/* 70/*
128 * Initialize handlers for the set of interrupts caused by hardware errors 71 * Initialize handlers for the set of interrupts caused by hardware errors
129 * and power system events. 72 * and power system events.
@@ -138,14 +81,15 @@ static int __init init_ras_IRQ(void)
138 /* Internal Errors */ 81 /* Internal Errors */
139 np = of_find_node_by_path("/event-sources/internal-errors"); 82 np = of_find_node_by_path("/event-sources/internal-errors");
140 if (np != NULL) { 83 if (np != NULL) {
141 request_ras_irqs(np, ras_error_interrupt, "RAS_ERROR"); 84 request_event_sources_irqs(np, ras_error_interrupt,
85 "RAS_ERROR");
142 of_node_put(np); 86 of_node_put(np);
143 } 87 }
144 88
145 /* EPOW Events */ 89 /* EPOW Events */
146 np = of_find_node_by_path("/event-sources/epow-events"); 90 np = of_find_node_by_path("/event-sources/epow-events");
147 if (np != NULL) { 91 if (np != NULL) {
148 request_ras_irqs(np, ras_epow_interrupt, "RAS_EPOW"); 92 request_event_sources_irqs(np, ras_epow_interrupt, "RAS_EPOW");
149 of_node_put(np); 93 of_node_put(np);
150 } 94 }
151 95
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 6710761bf60f..a6d19e3a505e 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -496,13 +496,14 @@ static int __init pSeries_probe(void)
496} 496}
497 497
498 498
499DECLARE_PER_CPU(unsigned long, smt_snooze_delay); 499DECLARE_PER_CPU(long, smt_snooze_delay);
500 500
501static void pseries_dedicated_idle_sleep(void) 501static void pseries_dedicated_idle_sleep(void)
502{ 502{
503 unsigned int cpu = smp_processor_id(); 503 unsigned int cpu = smp_processor_id();
504 unsigned long start_snooze; 504 unsigned long start_snooze;
505 unsigned long in_purr, out_purr; 505 unsigned long in_purr, out_purr;
506 long snooze = __get_cpu_var(smt_snooze_delay);
506 507
507 /* 508 /*
508 * Indicate to the HV that we are idle. Now would be 509 * Indicate to the HV that we are idle. Now would be
@@ -517,13 +518,12 @@ static void pseries_dedicated_idle_sleep(void)
517 * has been checked recently. If we should poll for a little 518 * has been checked recently. If we should poll for a little
518 * while, do so. 519 * while, do so.
519 */ 520 */
520 if (__get_cpu_var(smt_snooze_delay)) { 521 if (snooze) {
521 start_snooze = get_tb() + 522 start_snooze = get_tb() + snooze * tb_ticks_per_usec;
522 __get_cpu_var(smt_snooze_delay) * tb_ticks_per_usec;
523 local_irq_enable(); 523 local_irq_enable();
524 set_thread_flag(TIF_POLLING_NRFLAG); 524 set_thread_flag(TIF_POLLING_NRFLAG);
525 525
526 while (get_tb() < start_snooze) { 526 while ((snooze < 0) || (get_tb() < start_snooze)) {
527 if (need_resched() || cpu_is_offline(cpu)) 527 if (need_resched() || cpu_is_offline(cpu))
528 goto out; 528 goto out;
529 ppc64_runlatch_off(); 529 ppc64_runlatch_off();
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 4e7f89a84561..3b1bf61c45be 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -55,7 +55,29 @@
55 * The Primary thread of each non-boot processor was started from the OF client 55 * The Primary thread of each non-boot processor was started from the OF client
56 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop. 56 * interface by prom_hold_cpus and is spinning on secondary_hold_spinloop.
57 */ 57 */
58static cpumask_t of_spin_map; 58static cpumask_var_t of_spin_mask;
59
60/* Query where a cpu is now. Return codes #defined in plpar_wrappers.h */
61int smp_query_cpu_stopped(unsigned int pcpu)
62{
63 int cpu_status, status;
64 int qcss_tok = rtas_token("query-cpu-stopped-state");
65
66 if (qcss_tok == RTAS_UNKNOWN_SERVICE) {
67 printk(KERN_INFO "Firmware doesn't support "
68 "query-cpu-stopped-state\n");
69 return QCSS_HARDWARE_ERROR;
70 }
71
72 status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
73 if (status != 0) {
74 printk(KERN_ERR
75 "RTAS query-cpu-stopped-state failed: %i\n", status);
76 return status;
77 }
78
79 return cpu_status;
80}
59 81
60/** 82/**
61 * smp_startup_cpu() - start the given cpu 83 * smp_startup_cpu() - start the given cpu
@@ -76,12 +98,18 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu)
76 unsigned int pcpu; 98 unsigned int pcpu;
77 int start_cpu; 99 int start_cpu;
78 100
79 if (cpu_isset(lcpu, of_spin_map)) 101 if (cpumask_test_cpu(lcpu, of_spin_mask))
80 /* Already started by OF and sitting in spin loop */ 102 /* Already started by OF and sitting in spin loop */
81 return 1; 103 return 1;
82 104
83 pcpu = get_hard_smp_processor_id(lcpu); 105 pcpu = get_hard_smp_processor_id(lcpu);
84 106
107 /* Check to see if the CPU out of FW already for kexec */
108 if (smp_query_cpu_stopped(pcpu) == QCSS_NOT_STOPPED){
109 cpumask_set_cpu(lcpu, of_spin_mask);
110 return 1;
111 }
112
85 /* Fixup atomic count: it exited inside IRQ handler. */ 113 /* Fixup atomic count: it exited inside IRQ handler. */
86 task_thread_info(paca[lcpu].__current)->preempt_count = 0; 114 task_thread_info(paca[lcpu].__current)->preempt_count = 0;
87 115
@@ -115,7 +143,7 @@ static void __devinit smp_xics_setup_cpu(int cpu)
115 if (firmware_has_feature(FW_FEATURE_SPLPAR)) 143 if (firmware_has_feature(FW_FEATURE_SPLPAR))
116 vpa_init(cpu); 144 vpa_init(cpu);
117 145
118 cpu_clear(cpu, of_spin_map); 146 cpumask_clear_cpu(cpu, of_spin_mask);
119 set_cpu_current_state(cpu, CPU_STATE_ONLINE); 147 set_cpu_current_state(cpu, CPU_STATE_ONLINE);
120 set_default_offline_state(cpu); 148 set_default_offline_state(cpu);
121 149
@@ -186,17 +214,19 @@ static void __init smp_init_pseries(void)
186 214
187 pr_debug(" -> smp_init_pSeries()\n"); 215 pr_debug(" -> smp_init_pSeries()\n");
188 216
217 alloc_bootmem_cpumask_var(&of_spin_mask);
218
189 /* Mark threads which are still spinning in hold loops. */ 219 /* Mark threads which are still spinning in hold loops. */
190 if (cpu_has_feature(CPU_FTR_SMT)) { 220 if (cpu_has_feature(CPU_FTR_SMT)) {
191 for_each_present_cpu(i) { 221 for_each_present_cpu(i) {
192 if (cpu_thread_in_core(i) == 0) 222 if (cpu_thread_in_core(i) == 0)
193 cpu_set(i, of_spin_map); 223 cpumask_set_cpu(i, of_spin_mask);
194 } 224 }
195 } else { 225 } else {
196 of_spin_map = cpu_present_map; 226 cpumask_copy(of_spin_mask, cpu_present_mask);
197 } 227 }
198 228
199 cpu_clear(boot_cpuid, of_spin_map); 229 cpumask_clear_cpu(boot_cpuid, of_spin_mask);
200 230
201 /* Non-lpar has additional take/give timebase */ 231 /* Non-lpar has additional take/give timebase */
202 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { 232 if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index 1bcedd8b4616..f19d19468393 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -163,29 +163,37 @@ static inline void lpar_qirr_info(int n_cpu , u8 value)
163/* Interface to generic irq subsystem */ 163/* Interface to generic irq subsystem */
164 164
165#ifdef CONFIG_SMP 165#ifdef CONFIG_SMP
166static int get_irq_server(unsigned int virq, cpumask_t cpumask, 166/*
167 * For the moment we only implement delivery to all cpus or one cpu.
168 *
169 * If the requested affinity is cpu_all_mask, we set global affinity.
170 * If not we set it to the first cpu in the mask, even if multiple cpus
171 * are set. This is so things like irqbalance (which set core and package
172 * wide affinities) do the right thing.
173 */
174static int get_irq_server(unsigned int virq, const struct cpumask *cpumask,
167 unsigned int strict_check) 175 unsigned int strict_check)
168{ 176{
169 int server;
170 /* For the moment only implement delivery to all cpus or one cpu */
171 cpumask_t tmp = CPU_MASK_NONE;
172 177
173 if (!distribute_irqs) 178 if (!distribute_irqs)
174 return default_server; 179 return default_server;
175 180
176 if (!cpus_equal(cpumask, CPU_MASK_ALL)) { 181 if (!cpumask_equal(cpumask, cpu_all_mask)) {
177 cpus_and(tmp, cpu_online_map, cpumask); 182 int server = cpumask_first_and(cpu_online_mask, cpumask);
178
179 server = first_cpu(tmp);
180 183
181 if (server < NR_CPUS) 184 if (server < nr_cpu_ids)
182 return get_hard_smp_processor_id(server); 185 return get_hard_smp_processor_id(server);
183 186
184 if (strict_check) 187 if (strict_check)
185 return -1; 188 return -1;
186 } 189 }
187 190
188 if (cpus_equal(cpu_online_map, cpu_present_map)) 191 /*
192 * Workaround issue with some versions of JS20 firmware that
193 * deliver interrupts to cpus which haven't been started. This
194 * happens when using the maxcpus= boot option.
195 */
196 if (cpumask_equal(cpu_online_mask, cpu_present_mask))
189 return default_distrib_server; 197 return default_distrib_server;
190 198
191 return default_server; 199 return default_server;
@@ -207,7 +215,7 @@ static void xics_unmask_irq(unsigned int virq)
207 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) 215 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
208 return; 216 return;
209 217
210 server = get_irq_server(virq, *(irq_to_desc(virq)->affinity), 0); 218 server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0);
211 219
212 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, 220 call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server,
213 DEFAULT_PRIORITY); 221 DEFAULT_PRIORITY);
@@ -398,11 +406,7 @@ static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
398 return -1; 406 return -1;
399 } 407 }
400 408
401 /* 409 irq_server = get_irq_server(virq, cpumask, 1);
402 * For the moment only implement delivery to all cpus or one cpu.
403 * Get current irq_server for the given irq
404 */
405 irq_server = get_irq_server(virq, *cpumask, 1);
406 if (irq_server == -1) { 410 if (irq_server == -1) {
407 char cpulist[128]; 411 char cpulist[128];
408 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); 412 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
@@ -611,7 +615,7 @@ int __init smp_xics_probe(void)
611{ 615{
612 xics_request_ipi(); 616 xics_request_ipi();
613 617
614 return cpus_weight(cpu_possible_map); 618 return cpumask_weight(cpu_possible_mask);
615} 619}
616 620
617#endif /* CONFIG_SMP */ 621#endif /* CONFIG_SMP */