aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/pseries
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-06 13:49:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-06 13:49:42 -0400
commit39eda2aba6be642b71f2e0ad623dcb09fd9d79cf (patch)
treecd0c8f547847641af73e38aab2478f3119dee490 /arch/powerpc/platforms/pseries
parent2e515bf096c245ba87f20ab4b4ea20f911afaeda (diff)
parent9f24b0c9ef9b6b1292579c9e2cd7ff07ddc372b7 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull powerpc updates from Ben Herrenschmidt: "Here's the powerpc batch for this merge window. Some of the highlights are: - A bunch of endian fixes ! We don't have full LE support yet in that release but this contains a lot of fixes all over arch/powerpc to use the proper accessors, call the firmware with the right endian mode, etc... - A few updates to our "powernv" platform (non-virtualized, the one to run KVM on), among other, support for bridging the P8 LPC bus for UARTs, support and some EEH fixes. - Some mpc51xx clock API cleanups in preparation for a clock API overhaul - A pile of cleanups of our old math emulation code, including better support for using it to emulate optional FP instructions on embedded chips that otherwise have a HW FPU. - Some infrastructure in selftest, for powerpc now, but could be generalized, initially used by some tests for our perf instruction counting code. - A pile of fixes for hotplug on pseries (that was seriously bitrotting) - The usual slew of freescale embedded updates, new boards, 64-bit hiberation support, e6500 core PMU support, etc..." * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (146 commits) powerpc: Correct FSCR bit definitions powerpc/xmon: Fix printing of set of CPUs in xmon powerpc/pseries: Move lparcfg.c to platforms/pseries powerpc/powernv: Return secondary CPUs to firmware on kexec powerpc/btext: Fix CONFIG_PPC_EARLY_DEBUG_BOOTX on ppc32 powerpc: Cleanup handling of the DSCR bit in the FSCR register powerpc/pseries: Child nodes are not detached by dlpar_detach_node powerpc/pseries: Add mising of_node_put in delete_dt_node powerpc/pseries: Make dlpar_configure_connector parent node aware powerpc/pseries: Do all node initialization in dlpar_parse_cc_node powerpc/pseries: Fix parsing of initial node path in update_dt_node powerpc/pseries: Pack update_props_workarea to map correctly to rtas buffer header powerpc/pseries: Fix over writing of rtas return code in update_dt_node powerpc/pseries: Fix creation of loop in device node property list powerpc: Skip emulating & leave interrupts off for kernel program checks powerpc: Add more exception trampolines for hypervisor exceptions powerpc: Fix location and rename exception trampolines powerpc: Add more trap names to xmon powerpc/pseries: Add a warning in the case of cross-cpu VPA registration powerpc: Update the 00-Index in Documentation/powerpc ...
Diffstat (limited to 'arch/powerpc/platforms/pseries')
-rw-r--r--arch/powerpc/platforms/pseries/Makefile1
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c3
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c67
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c5
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c7
-rw-r--r--arch/powerpc/platforms/pseries/hvconsole.c19
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/kexec.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c12
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c710
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c45
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h324
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c12
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h5
-rw-r--r--arch/powerpc/platforms/pseries/pseries_energy.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c6
-rw-r--r--arch/powerpc/platforms/pseries/smp.c20
17 files changed, 822 insertions, 431 deletions
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 8ae010381316..6c61ec5ee914 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_CMM) += cmm.o
22obj-$(CONFIG_DTL) += dtl.o 22obj-$(CONFIG_DTL) += dtl.o
23obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o 23obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
24obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o 24obj-$(CONFIG_PSERIES_IDLE) += processor_idle.o
25obj-$(CONFIG_LPARCFG) += lparcfg.o
25 26
26ifeq ($(CONFIG_PPC_PSERIES),y) 27ifeq ($(CONFIG_PPC_PSERIES),y)
27obj-$(CONFIG_SUSPEND) += suspend.o 28obj-$(CONFIG_SUSPEND) += suspend.o
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index c638535753df..1e561bef459b 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -40,8 +40,7 @@
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/uaccess.h> 41#include <asm/uaccess.h>
42#include <linux/memory.h> 42#include <linux/memory.h>
43 43#include <asm/plpar_wrappers.h>
44#include "plpar_wrappers.h"
45 44
46#define CMM_DRIVER_VERSION "1.0.0" 45#define CMM_DRIVER_VERSION "1.0.0"
47#define CMM_DEFAULT_DELAY 1 46#define CMM_DEFAULT_DELAY 1
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index a1a7b9a67ffd..7cfdaae1721a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -63,26 +63,32 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
63 return prop; 63 return prop;
64} 64}
65 65
66static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) 66static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
67 const char *path)
67{ 68{
68 struct device_node *dn; 69 struct device_node *dn;
69 char *name; 70 char *name;
70 71
72 /* If parent node path is "/" advance path to NULL terminator to
73 * prevent double leading slashs in full_name.
74 */
75 if (!path[1])
76 path++;
77
71 dn = kzalloc(sizeof(*dn), GFP_KERNEL); 78 dn = kzalloc(sizeof(*dn), GFP_KERNEL);
72 if (!dn) 79 if (!dn)
73 return NULL; 80 return NULL;
74 81
75 /* The configure connector reported name does not contain a
76 * preceding '/', so we allocate a buffer large enough to
77 * prepend this to the full_name.
78 */
79 name = (char *)ccwa + ccwa->name_offset; 82 name = (char *)ccwa + ccwa->name_offset;
80 dn->full_name = kasprintf(GFP_KERNEL, "/%s", name); 83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
81 if (!dn->full_name) { 84 if (!dn->full_name) {
82 kfree(dn); 85 kfree(dn);
83 return NULL; 86 return NULL;
84 } 87 }
85 88
89 of_node_set_flag(dn, OF_DYNAMIC);
90 kref_init(&dn->kref);
91
86 return dn; 92 return dn;
87} 93}
88 94
@@ -120,7 +126,8 @@ void dlpar_free_cc_nodes(struct device_node *dn)
120#define CALL_AGAIN -2 126#define CALL_AGAIN -2
121#define ERR_CFG_USE -9003 127#define ERR_CFG_USE -9003
122 128
123struct device_node *dlpar_configure_connector(u32 drc_index) 129struct device_node *dlpar_configure_connector(u32 drc_index,
130 struct device_node *parent)
124{ 131{
125 struct device_node *dn; 132 struct device_node *dn;
126 struct device_node *first_dn = NULL; 133 struct device_node *first_dn = NULL;
@@ -129,6 +136,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
129 struct property *last_property = NULL; 136 struct property *last_property = NULL;
130 struct cc_workarea *ccwa; 137 struct cc_workarea *ccwa;
131 char *data_buf; 138 char *data_buf;
139 const char *parent_path = parent->full_name;
132 int cc_token; 140 int cc_token;
133 int rc = -1; 141 int rc = -1;
134 142
@@ -162,7 +170,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
162 break; 170 break;
163 171
164 case NEXT_SIBLING: 172 case NEXT_SIBLING:
165 dn = dlpar_parse_cc_node(ccwa); 173 dn = dlpar_parse_cc_node(ccwa, parent_path);
166 if (!dn) 174 if (!dn)
167 goto cc_error; 175 goto cc_error;
168 176
@@ -172,13 +180,17 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
172 break; 180 break;
173 181
174 case NEXT_CHILD: 182 case NEXT_CHILD:
175 dn = dlpar_parse_cc_node(ccwa); 183 if (first_dn)
184 parent_path = last_dn->full_name;
185
186 dn = dlpar_parse_cc_node(ccwa, parent_path);
176 if (!dn) 187 if (!dn)
177 goto cc_error; 188 goto cc_error;
178 189
179 if (!first_dn) 190 if (!first_dn) {
191 dn->parent = parent;
180 first_dn = dn; 192 first_dn = dn;
181 else { 193 } else {
182 dn->parent = last_dn; 194 dn->parent = last_dn;
183 if (last_dn) 195 if (last_dn)
184 last_dn->child = dn; 196 last_dn->child = dn;
@@ -202,6 +214,7 @@ struct device_node *dlpar_configure_connector(u32 drc_index)
202 214
203 case PREV_PARENT: 215 case PREV_PARENT:
204 last_dn = last_dn->parent; 216 last_dn = last_dn->parent;
217 parent_path = last_dn->parent->full_name;
205 break; 218 break;
206 219
207 case CALL_AGAIN: 220 case CALL_AGAIN:
@@ -256,8 +269,6 @@ int dlpar_attach_node(struct device_node *dn)
256{ 269{
257 int rc; 270 int rc;
258 271
259 of_node_set_flag(dn, OF_DYNAMIC);
260 kref_init(&dn->kref);
261 dn->parent = derive_parent(dn->full_name); 272 dn->parent = derive_parent(dn->full_name);
262 if (!dn->parent) 273 if (!dn->parent)
263 return -ENOMEM; 274 return -ENOMEM;
@@ -275,8 +286,15 @@ int dlpar_attach_node(struct device_node *dn)
275 286
276int dlpar_detach_node(struct device_node *dn) 287int dlpar_detach_node(struct device_node *dn)
277{ 288{
289 struct device_node *child;
278 int rc; 290 int rc;
279 291
292 child = of_get_next_child(dn, NULL);
293 while (child) {
294 dlpar_detach_node(child);
295 child = of_get_next_child(dn, child);
296 }
297
280 rc = of_detach_node(dn); 298 rc = of_detach_node(dn);
281 if (rc) 299 if (rc)
282 return rc; 300 return rc;
@@ -382,9 +400,8 @@ out:
382 400
383static ssize_t dlpar_cpu_probe(const char *buf, size_t count) 401static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
384{ 402{
385 struct device_node *dn; 403 struct device_node *dn, *parent;
386 unsigned long drc_index; 404 unsigned long drc_index;
387 char *cpu_name;
388 int rc; 405 int rc;
389 406
390 cpu_hotplug_driver_lock(); 407 cpu_hotplug_driver_lock();
@@ -394,25 +411,19 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
394 goto out; 411 goto out;
395 } 412 }
396 413
397 dn = dlpar_configure_connector(drc_index); 414 parent = of_find_node_by_path("/cpus");
398 if (!dn) { 415 if (!parent) {
399 rc = -EINVAL; 416 rc = -ENODEV;
400 goto out; 417 goto out;
401 } 418 }
402 419
403 /* configure-connector reports cpus as living in the base 420 dn = dlpar_configure_connector(drc_index, parent);
404 * directory of the device tree. CPUs actually live in the 421 if (!dn) {
405 * cpus directory so we need to fixup the full_name. 422 rc = -EINVAL;
406 */
407 cpu_name = kasprintf(GFP_KERNEL, "/cpus%s", dn->full_name);
408 if (!cpu_name) {
409 dlpar_free_cc_nodes(dn);
410 rc = -ENOMEM;
411 goto out; 423 goto out;
412 } 424 }
413 425
414 kfree(dn->full_name); 426 of_node_put(parent);
415 dn->full_name = cpu_name;
416 427
417 rc = dlpar_acquire_drc(drc_index); 428 rc = dlpar_acquire_drc(drc_index);
418 if (rc) { 429 if (rc) {
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 0cc0ac07a55d..5db66f1fbc26 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -29,8 +29,7 @@
29#include <asm/firmware.h> 29#include <asm/firmware.h>
30#include <asm/lppaca.h> 30#include <asm/lppaca.h>
31#include <asm/debug.h> 31#include <asm/debug.h>
32 32#include <asm/plpar_wrappers.h>
33#include "plpar_wrappers.h"
34 33
35struct dtl { 34struct dtl {
36 struct dtl_entry *buf; 35 struct dtl_entry *buf;
@@ -87,7 +86,7 @@ static void consume_dtle(struct dtl_entry *dtle, u64 index)
87 barrier(); 86 barrier();
88 87
89 /* check for hypervisor ring buffer overflow, ignore this entry if so */ 88 /* check for hypervisor ring buffer overflow, ignore this entry if so */
90 if (index + N_DISPATCH_LOG < vpa->dtl_idx) 89 if (index + N_DISPATCH_LOG < be64_to_cpu(vpa->dtl_idx))
91 return; 90 return;
92 91
93 ++wp; 92 ++wp;
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 217ca5c75b20..82789e79e539 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -30,7 +30,8 @@
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/vdso_datapage.h> 31#include <asm/vdso_datapage.h>
32#include <asm/xics.h> 32#include <asm/xics.h>
33#include "plpar_wrappers.h" 33#include <asm/plpar_wrappers.h>
34
34#include "offline_states.h" 35#include "offline_states.h"
35 36
36/* This version can't take the spinlock, because it never returns */ 37/* This version can't take the spinlock, because it never returns */
@@ -123,7 +124,7 @@ static void pseries_mach_cpu_die(void)
123 cede_latency_hint = 2; 124 cede_latency_hint = 2;
124 125
125 get_lppaca()->idle = 1; 126 get_lppaca()->idle = 1;
126 if (!get_lppaca()->shared_proc) 127 if (!lppaca_shared_proc(get_lppaca()))
127 get_lppaca()->donate_dedicated_cpu = 1; 128 get_lppaca()->donate_dedicated_cpu = 1;
128 129
129 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { 130 while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
@@ -137,7 +138,7 @@ static void pseries_mach_cpu_die(void)
137 138
138 local_irq_disable(); 139 local_irq_disable();
139 140
140 if (!get_lppaca()->shared_proc) 141 if (!lppaca_shared_proc(get_lppaca()))
141 get_lppaca()->donate_dedicated_cpu = 0; 142 get_lppaca()->donate_dedicated_cpu = 0;
142 get_lppaca()->idle = 0; 143 get_lppaca()->idle = 0;
143 144
diff --git a/arch/powerpc/platforms/pseries/hvconsole.c b/arch/powerpc/platforms/pseries/hvconsole.c
index b344f94b0400..849b29b3e9ae 100644
--- a/arch/powerpc/platforms/pseries/hvconsole.c
+++ b/arch/powerpc/platforms/pseries/hvconsole.c
@@ -28,7 +28,7 @@
28#include <linux/errno.h> 28#include <linux/errno.h>
29#include <asm/hvcall.h> 29#include <asm/hvcall.h>
30#include <asm/hvconsole.h> 30#include <asm/hvconsole.h>
31#include "plpar_wrappers.h" 31#include <asm/plpar_wrappers.h>
32 32
33/** 33/**
34 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper 34 * hvc_get_chars - retrieve characters from firmware for denoted vterm adatper
@@ -40,10 +40,16 @@
40 */ 40 */
41int hvc_get_chars(uint32_t vtermno, char *buf, int count) 41int hvc_get_chars(uint32_t vtermno, char *buf, int count)
42{ 42{
43 unsigned long got; 43 long ret;
44 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
45 unsigned long *lbuf = (unsigned long *)buf;
46
47 ret = plpar_hcall(H_GET_TERM_CHAR, retbuf, vtermno);
48 lbuf[0] = be64_to_cpu(retbuf[1]);
49 lbuf[1] = be64_to_cpu(retbuf[2]);
44 50
45 if (plpar_get_term_char(vtermno, &got, buf) == H_SUCCESS) 51 if (ret == H_SUCCESS)
46 return got; 52 return retbuf[0];
47 53
48 return 0; 54 return 0;
49} 55}
@@ -69,8 +75,9 @@ int hvc_put_chars(uint32_t vtermno, const char *buf, int count)
69 if (count > MAX_VIO_PUT_CHARS) 75 if (count > MAX_VIO_PUT_CHARS)
70 count = MAX_VIO_PUT_CHARS; 76 count = MAX_VIO_PUT_CHARS;
71 77
72 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count, lbuf[0], 78 ret = plpar_hcall_norets(H_PUT_TERM_CHAR, vtermno, count,
73 lbuf[1]); 79 cpu_to_be64(lbuf[0]),
80 cpu_to_be64(lbuf[1]));
74 if (ret == H_SUCCESS) 81 if (ret == H_SUCCESS)
75 return count; 82 return count;
76 if (ret == H_BUSY) 83 if (ret == H_BUSY)
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 23fc1dcf4434..0307901e4132 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -48,8 +48,7 @@
48#include <asm/ppc-pci.h> 48#include <asm/ppc-pci.h>
49#include <asm/udbg.h> 49#include <asm/udbg.h>
50#include <asm/mmzone.h> 50#include <asm/mmzone.h>
51 51#include <asm/plpar_wrappers.h>
52#include "plpar_wrappers.h"
53 52
54 53
55static void tce_invalidate_pSeries_sw(struct iommu_table *tbl, 54static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
@@ -530,7 +529,7 @@ static void iommu_table_setparms(struct pci_controller *phb,
530static void iommu_table_setparms_lpar(struct pci_controller *phb, 529static void iommu_table_setparms_lpar(struct pci_controller *phb,
531 struct device_node *dn, 530 struct device_node *dn,
532 struct iommu_table *tbl, 531 struct iommu_table *tbl,
533 const void *dma_window) 532 const __be32 *dma_window)
534{ 533{
535 unsigned long offset, size; 534 unsigned long offset, size;
536 535
@@ -630,7 +629,7 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
630 struct iommu_table *tbl; 629 struct iommu_table *tbl;
631 struct device_node *dn, *pdn; 630 struct device_node *dn, *pdn;
632 struct pci_dn *ppci; 631 struct pci_dn *ppci;
633 const void *dma_window = NULL; 632 const __be32 *dma_window = NULL;
634 633
635 dn = pci_bus_to_OF_node(bus); 634 dn = pci_bus_to_OF_node(bus);
636 635
@@ -1152,7 +1151,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1152{ 1151{
1153 struct device_node *pdn, *dn; 1152 struct device_node *pdn, *dn;
1154 struct iommu_table *tbl; 1153 struct iommu_table *tbl;
1155 const void *dma_window = NULL; 1154 const __be32 *dma_window = NULL;
1156 struct pci_dn *pci; 1155 struct pci_dn *pci;
1157 1156
1158 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); 1157 pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
@@ -1201,7 +1200,7 @@ static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask)
1201 bool ddw_enabled = false; 1200 bool ddw_enabled = false;
1202 struct device_node *pdn, *dn; 1201 struct device_node *pdn, *dn;
1203 struct pci_dev *pdev; 1202 struct pci_dev *pdev;
1204 const void *dma_window = NULL; 1203 const __be32 *dma_window = NULL;
1205 u64 dma_offset; 1204 u64 dma_offset;
1206 1205
1207 if (!dev->dma_mask) 1206 if (!dev->dma_mask)
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c
index 7d94bdc63d50..13fa95b3aa8b 100644
--- a/arch/powerpc/platforms/pseries/kexec.c
+++ b/arch/powerpc/platforms/pseries/kexec.c
@@ -17,9 +17,9 @@
17#include <asm/mpic.h> 17#include <asm/mpic.h>
18#include <asm/xics.h> 18#include <asm/xics.h>
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/plpar_wrappers.h>
20 21
21#include "pseries.h" 22#include "pseries.h"
22#include "plpar_wrappers.h"
23 23
24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) 24static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
25{ 25{
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8bad880bd177..356bc75ca74f 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,8 +41,8 @@
41#include <asm/smp.h> 41#include <asm/smp.h>
42#include <asm/trace.h> 42#include <asm/trace.h>
43#include <asm/firmware.h> 43#include <asm/firmware.h>
44#include <asm/plpar_wrappers.h>
44 45
45#include "plpar_wrappers.h"
46#include "pseries.h" 46#include "pseries.h"
47 47
48/* Flag bits for H_BULK_REMOVE */ 48/* Flag bits for H_BULK_REMOVE */
@@ -68,6 +68,12 @@ void vpa_init(int cpu)
68 struct paca_struct *pp; 68 struct paca_struct *pp;
69 struct dtl_entry *dtl; 69 struct dtl_entry *dtl;
70 70
71 /*
72 * The spec says it "may be problematic" if CPU x registers the VPA of
73 * CPU y. We should never do that, but wail if we ever do.
74 */
75 WARN_ON(cpu != smp_processor_id());
76
71 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 77 if (cpu_has_feature(CPU_FTR_ALTIVEC))
72 lppaca_of(cpu).vmxregs_in_use = 1; 78 lppaca_of(cpu).vmxregs_in_use = 1;
73 79
@@ -106,7 +112,7 @@ void vpa_init(int cpu)
106 lppaca_of(cpu).dtl_idx = 0; 112 lppaca_of(cpu).dtl_idx = 0;
107 113
108 /* hypervisor reads buffer length from this field */ 114 /* hypervisor reads buffer length from this field */
109 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 115 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
110 ret = register_dtl(hwcpu, __pa(dtl)); 116 ret = register_dtl(hwcpu, __pa(dtl));
111 if (ret) 117 if (ret)
112 pr_err("WARNING: DTL registration of cpu %d (hw %d) " 118 pr_err("WARNING: DTL registration of cpu %d (hw %d) "
@@ -724,7 +730,7 @@ int h_get_mpp(struct hvcall_mpp_data *mpp_data)
724 730
725 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; 731 mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff;
726 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; 732 mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff;
727 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; 733 mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL;
728 734
729 mpp_data->pool_size = retbuf[4]; 735 mpp_data->pool_size = retbuf[4];
730 mpp_data->loan_request = retbuf[5]; 736 mpp_data->loan_request = retbuf[5];
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
new file mode 100644
index 000000000000..e738007eae64
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -0,0 +1,710 @@
1/*
2 * PowerPC64 LPAR Configuration Information Driver
3 *
4 * Dave Engebretsen engebret@us.ibm.com
5 * Copyright (c) 2003 Dave Engebretsen
6 * Will Schmidt willschm@us.ibm.com
7 * SPLPAR updates, Copyright (c) 2003 Will Schmidt IBM Corporation.
8 * seq_file updates, Copyright (c) 2004 Will Schmidt IBM Corporation.
9 * Nathan Lynch nathanl@austin.ibm.com
10 * Added lparcfg_write, Copyright (C) 2004 Nathan Lynch IBM Corporation.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 *
17 * This driver creates a proc file at /proc/ppc64/lparcfg which contains
18 * keyword - value pairs that specify the configuration of the partition.
19 */
20
21#include <linux/module.h>
22#include <linux/types.h>
23#include <linux/errno.h>
24#include <linux/proc_fs.h>
25#include <linux/init.h>
26#include <linux/seq_file.h>
27#include <linux/slab.h>
28#include <asm/uaccess.h>
29#include <asm/lppaca.h>
30#include <asm/hvcall.h>
31#include <asm/firmware.h>
32#include <asm/rtas.h>
33#include <asm/time.h>
34#include <asm/prom.h>
35#include <asm/vdso_datapage.h>
36#include <asm/vio.h>
37#include <asm/mmu.h>
38#include <asm/machdep.h>
39
40
41/*
42 * This isn't a module but we expose that to userspace
43 * via /proc so leave the definitions here
44 */
45#define MODULE_VERS "1.9"
46#define MODULE_NAME "lparcfg"
47
48/* #define LPARCFG_DEBUG */
49
50/*
51 * Track sum of all purrs across all processors. This is used to further
52 * calculate usage values by different applications
53 */
54static unsigned long get_purr(void)
55{
56 unsigned long sum_purr = 0;
57 int cpu;
58
59 for_each_possible_cpu(cpu) {
60 struct cpu_usage *cu;
61
62 cu = &per_cpu(cpu_usage_array, cpu);
63 sum_purr += cu->current_tb;
64 }
65 return sum_purr;
66}
67
68/*
69 * Methods used to fetch LPAR data when running on a pSeries platform.
70 */
71
72struct hvcall_ppp_data {
73 u64 entitlement;
74 u64 unallocated_entitlement;
75 u16 group_num;
76 u16 pool_num;
77 u8 capped;
78 u8 weight;
79 u8 unallocated_weight;
80 u16 active_procs_in_pool;
81 u16 active_system_procs;
82 u16 phys_platform_procs;
83 u32 max_proc_cap_avail;
84 u32 entitled_proc_cap_avail;
85};
86
87/*
88 * H_GET_PPP hcall returns info in 4 parms.
89 * entitled_capacity,unallocated_capacity,
90 * aggregation, resource_capability).
91 *
92 * R4 = Entitled Processor Capacity Percentage.
93 * R5 = Unallocated Processor Capacity Percentage.
94 * R6 (AABBCCDDEEFFGGHH).
95 * XXXX - reserved (0)
96 * XXXX - reserved (0)
97 * XXXX - Group Number
98 * XXXX - Pool Number.
99 * R7 (IIJJKKLLMMNNOOPP).
100 * XX - reserved. (0)
101 * XX - bit 0-6 reserved (0). bit 7 is Capped indicator.
102 * XX - variable processor Capacity Weight
103 * XX - Unallocated Variable Processor Capacity Weight.
104 * XXXX - Active processors in Physical Processor Pool.
105 * XXXX - Processors active on platform.
106 * R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
107 * XXXX - Physical platform procs allocated to virtualization.
108 * XXXXXX - Max procs capacity % available to the partitions pool.
109 * XXXXXX - Entitled procs capacity % available to the
110 * partitions pool.
111 */
112static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
113{
114 unsigned long rc;
115 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
116
117 rc = plpar_hcall9(H_GET_PPP, retbuf);
118
119 ppp_data->entitlement = retbuf[0];
120 ppp_data->unallocated_entitlement = retbuf[1];
121
122 ppp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff;
123 ppp_data->pool_num = retbuf[2] & 0xffff;
124
125 ppp_data->capped = (retbuf[3] >> 6 * 8) & 0x01;
126 ppp_data->weight = (retbuf[3] >> 5 * 8) & 0xff;
127 ppp_data->unallocated_weight = (retbuf[3] >> 4 * 8) & 0xff;
128 ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
129 ppp_data->active_system_procs = retbuf[3] & 0xffff;
130
131 ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
132 ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
133 ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
134
135 return rc;
136}
137
138static unsigned h_pic(unsigned long *pool_idle_time,
139 unsigned long *num_procs)
140{
141 unsigned long rc;
142 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
143
144 rc = plpar_hcall(H_PIC, retbuf);
145
146 *pool_idle_time = retbuf[0];
147 *num_procs = retbuf[1];
148
149 return rc;
150}
151
152/*
153 * parse_ppp_data
154 * Parse out the data returned from h_get_ppp and h_pic
155 */
156static void parse_ppp_data(struct seq_file *m)
157{
158 struct hvcall_ppp_data ppp_data;
159 struct device_node *root;
160 const int *perf_level;
161 int rc;
162
163 rc = h_get_ppp(&ppp_data);
164 if (rc)
165 return;
166
167 seq_printf(m, "partition_entitled_capacity=%lld\n",
168 ppp_data.entitlement);
169 seq_printf(m, "group=%d\n", ppp_data.group_num);
170 seq_printf(m, "system_active_processors=%d\n",
171 ppp_data.active_system_procs);
172
173 /* pool related entries are appropriate for shared configs */
174 if (lppaca_shared_proc(get_lppaca())) {
175 unsigned long pool_idle_time, pool_procs;
176
177 seq_printf(m, "pool=%d\n", ppp_data.pool_num);
178
179 /* report pool_capacity in percentage */
180 seq_printf(m, "pool_capacity=%d\n",
181 ppp_data.active_procs_in_pool * 100);
182
183 h_pic(&pool_idle_time, &pool_procs);
184 seq_printf(m, "pool_idle_time=%ld\n", pool_idle_time);
185 seq_printf(m, "pool_num_procs=%ld\n", pool_procs);
186 }
187
188 seq_printf(m, "unallocated_capacity_weight=%d\n",
189 ppp_data.unallocated_weight);
190 seq_printf(m, "capacity_weight=%d\n", ppp_data.weight);
191 seq_printf(m, "capped=%d\n", ppp_data.capped);
192 seq_printf(m, "unallocated_capacity=%lld\n",
193 ppp_data.unallocated_entitlement);
194
195 /* The last bits of information returned from h_get_ppp are only
196 * valid if the ibm,partition-performance-parameters-level
197 * property is >= 1.
198 */
199 root = of_find_node_by_path("/");
200 if (root) {
201 perf_level = of_get_property(root,
202 "ibm,partition-performance-parameters-level",
203 NULL);
204 if (perf_level && (*perf_level >= 1)) {
205 seq_printf(m,
206 "physical_procs_allocated_to_virtualization=%d\n",
207 ppp_data.phys_platform_procs);
208 seq_printf(m, "max_proc_capacity_available=%d\n",
209 ppp_data.max_proc_cap_avail);
210 seq_printf(m, "entitled_proc_capacity_available=%d\n",
211 ppp_data.entitled_proc_cap_avail);
212 }
213
214 of_node_put(root);
215 }
216}
217
218/**
219 * parse_mpp_data
220 * Parse out data returned from h_get_mpp
221 */
222static void parse_mpp_data(struct seq_file *m)
223{
224 struct hvcall_mpp_data mpp_data;
225 int rc;
226
227 rc = h_get_mpp(&mpp_data);
228 if (rc)
229 return;
230
231 seq_printf(m, "entitled_memory=%ld\n", mpp_data.entitled_mem);
232
233 if (mpp_data.mapped_mem != -1)
234 seq_printf(m, "mapped_entitled_memory=%ld\n",
235 mpp_data.mapped_mem);
236
237 seq_printf(m, "entitled_memory_group_number=%d\n", mpp_data.group_num);
238 seq_printf(m, "entitled_memory_pool_number=%d\n", mpp_data.pool_num);
239
240 seq_printf(m, "entitled_memory_weight=%d\n", mpp_data.mem_weight);
241 seq_printf(m, "unallocated_entitled_memory_weight=%d\n",
242 mpp_data.unallocated_mem_weight);
243 seq_printf(m, "unallocated_io_mapping_entitlement=%ld\n",
244 mpp_data.unallocated_entitlement);
245
246 if (mpp_data.pool_size != -1)
247 seq_printf(m, "entitled_memory_pool_size=%ld bytes\n",
248 mpp_data.pool_size);
249
250 seq_printf(m, "entitled_memory_loan_request=%ld\n",
251 mpp_data.loan_request);
252
253 seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem);
254}
255
256/**
257 * parse_mpp_x_data
258 * Parse out data returned from h_get_mpp_x
259 */
260static void parse_mpp_x_data(struct seq_file *m)
261{
262 struct hvcall_mpp_x_data mpp_x_data;
263
264 if (!firmware_has_feature(FW_FEATURE_XCMO))
265 return;
266 if (h_get_mpp_x(&mpp_x_data))
267 return;
268
269 seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes);
270
271 if (mpp_x_data.pool_coalesced_bytes)
272 seq_printf(m, "pool_coalesced_bytes=%ld\n",
273 mpp_x_data.pool_coalesced_bytes);
274 if (mpp_x_data.pool_purr_cycles)
275 seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles);
276 if (mpp_x_data.pool_spurr_cycles)
277 seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles);
278}
279
280#define SPLPAR_CHARACTERISTICS_TOKEN 20
281#define SPLPAR_MAXLENGTH 1026*(sizeof(char))
282
283/*
284 * parse_system_parameter_string()
285 * Retrieve the potential_processors, max_entitled_capacity and friends
286 * through the get-system-parameter rtas call. Replace keyword strings as
287 * necessary.
288 */
289static void parse_system_parameter_string(struct seq_file *m)
290{
291 int call_status;
292
293 unsigned char *local_buffer = kmalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
294 if (!local_buffer) {
295 printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
296 __FILE__, __func__, __LINE__);
297 return;
298 }
299
300 spin_lock(&rtas_data_buf_lock);
301 memset(rtas_data_buf, 0, SPLPAR_MAXLENGTH);
302 call_status = rtas_call(rtas_token("ibm,get-system-parameter"), 3, 1,
303 NULL,
304 SPLPAR_CHARACTERISTICS_TOKEN,
305 __pa(rtas_data_buf),
306 RTAS_DATA_BUF_SIZE);
307 memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
308 local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
309 spin_unlock(&rtas_data_buf_lock);
310
311 if (call_status != 0) {
312 printk(KERN_INFO
313 "%s %s Error calling get-system-parameter (0x%x)\n",
314 __FILE__, __func__, call_status);
315 } else {
316 int splpar_strlen;
317 int idx, w_idx;
318 char *workbuffer = kzalloc(SPLPAR_MAXLENGTH, GFP_KERNEL);
319 if (!workbuffer) {
320 printk(KERN_ERR "%s %s kmalloc failure at line %d\n",
321 __FILE__, __func__, __LINE__);
322 kfree(local_buffer);
323 return;
324 }
325#ifdef LPARCFG_DEBUG
326 printk(KERN_INFO "success calling get-system-parameter\n");
327#endif
328 splpar_strlen = local_buffer[0] * 256 + local_buffer[1];
329 local_buffer += 2; /* step over strlen value */
330
331 w_idx = 0;
332 idx = 0;
333 while ((*local_buffer) && (idx < splpar_strlen)) {
334 workbuffer[w_idx++] = local_buffer[idx++];
335 if ((local_buffer[idx] == ',')
336 || (local_buffer[idx] == '\0')) {
337 workbuffer[w_idx] = '\0';
338 if (w_idx) {
339 /* avoid the empty string */
340 seq_printf(m, "%s\n", workbuffer);
341 }
342 memset(workbuffer, 0, SPLPAR_MAXLENGTH);
343 idx++; /* skip the comma */
344 w_idx = 0;
345 } else if (local_buffer[idx] == '=') {
346 /* code here to replace workbuffer contents
347 with different keyword strings */
348 if (0 == strcmp(workbuffer, "MaxEntCap")) {
349 strcpy(workbuffer,
350 "partition_max_entitled_capacity");
351 w_idx = strlen(workbuffer);
352 }
353 if (0 == strcmp(workbuffer, "MaxPlatProcs")) {
354 strcpy(workbuffer,
355 "system_potential_processors");
356 w_idx = strlen(workbuffer);
357 }
358 }
359 }
360 kfree(workbuffer);
361 local_buffer -= 2; /* back up over strlen value */
362 }
363 kfree(local_buffer);
364}
365
366/* Return the number of processors in the system.
367 * This function reads through the device tree and counts
368 * the virtual processors, this does not include threads.
369 */
370static int lparcfg_count_active_processors(void)
371{
372 struct device_node *cpus_dn = NULL;
373 int count = 0;
374
375 while ((cpus_dn = of_find_node_by_type(cpus_dn, "cpu"))) {
376#ifdef LPARCFG_DEBUG
377 printk(KERN_ERR "cpus_dn %p\n", cpus_dn);
378#endif
379 count++;
380 }
381 return count;
382}
383
384static void pseries_cmo_data(struct seq_file *m)
385{
386 int cpu;
387 unsigned long cmo_faults = 0;
388 unsigned long cmo_fault_time = 0;
389
390 seq_printf(m, "cmo_enabled=%d\n", firmware_has_feature(FW_FEATURE_CMO));
391
392 if (!firmware_has_feature(FW_FEATURE_CMO))
393 return;
394
395 for_each_possible_cpu(cpu) {
396 cmo_faults += be64_to_cpu(lppaca_of(cpu).cmo_faults);
397 cmo_fault_time += be64_to_cpu(lppaca_of(cpu).cmo_fault_time);
398 }
399
400 seq_printf(m, "cmo_faults=%lu\n", cmo_faults);
401 seq_printf(m, "cmo_fault_time_usec=%lu\n",
402 cmo_fault_time / tb_ticks_per_usec);
403 seq_printf(m, "cmo_primary_psp=%d\n", cmo_get_primary_psp());
404 seq_printf(m, "cmo_secondary_psp=%d\n", cmo_get_secondary_psp());
405 seq_printf(m, "cmo_page_size=%lu\n", cmo_get_page_size());
406}
407
408static void splpar_dispatch_data(struct seq_file *m)
409{
410 int cpu;
411 unsigned long dispatches = 0;
412 unsigned long dispatch_dispersions = 0;
413
414 for_each_possible_cpu(cpu) {
415 dispatches += be32_to_cpu(lppaca_of(cpu).yield_count);
416 dispatch_dispersions +=
417 be32_to_cpu(lppaca_of(cpu).dispersion_count);
418 }
419
420 seq_printf(m, "dispatches=%lu\n", dispatches);
421 seq_printf(m, "dispatch_dispersions=%lu\n", dispatch_dispersions);
422}
423
424static void parse_em_data(struct seq_file *m)
425{
426 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
427
428 if (firmware_has_feature(FW_FEATURE_LPAR) &&
429 plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS)
430 seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]);
431}
432
433static int pseries_lparcfg_data(struct seq_file *m, void *v)
434{
435 int partition_potential_processors;
436 int partition_active_processors;
437 struct device_node *rtas_node;
438 const int *lrdrp = NULL;
439
440 rtas_node = of_find_node_by_path("/rtas");
441 if (rtas_node)
442 lrdrp = of_get_property(rtas_node, "ibm,lrdr-capacity", NULL);
443
444 if (lrdrp == NULL) {
445 partition_potential_processors = vdso_data->processorCount;
446 } else {
447 partition_potential_processors = *(lrdrp + 4);
448 }
449 of_node_put(rtas_node);
450
451 partition_active_processors = lparcfg_count_active_processors();
452
453 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
454 /* this call handles the ibm,get-system-parameter contents */
455 parse_system_parameter_string(m);
456 parse_ppp_data(m);
457 parse_mpp_data(m);
458 parse_mpp_x_data(m);
459 pseries_cmo_data(m);
460 splpar_dispatch_data(m);
461
462 seq_printf(m, "purr=%ld\n", get_purr());
463 } else { /* non SPLPAR case */
464
465 seq_printf(m, "system_active_processors=%d\n",
466 partition_potential_processors);
467
468 seq_printf(m, "system_potential_processors=%d\n",
469 partition_potential_processors);
470
471 seq_printf(m, "partition_max_entitled_capacity=%d\n",
472 partition_potential_processors * 100);
473
474 seq_printf(m, "partition_entitled_capacity=%d\n",
475 partition_active_processors * 100);
476 }
477
478 seq_printf(m, "partition_active_processors=%d\n",
479 partition_active_processors);
480
481 seq_printf(m, "partition_potential_processors=%d\n",
482 partition_potential_processors);
483
484 seq_printf(m, "shared_processor_mode=%d\n",
485 lppaca_shared_proc(get_lppaca()));
486
487 seq_printf(m, "slb_size=%d\n", mmu_slb_size);
488
489 parse_em_data(m);
490
491 return 0;
492}
493
494static ssize_t update_ppp(u64 *entitlement, u8 *weight)
495{
496 struct hvcall_ppp_data ppp_data;
497 u8 new_weight;
498 u64 new_entitled;
499 ssize_t retval;
500
501 /* Get our current parameters */
502 retval = h_get_ppp(&ppp_data);
503 if (retval)
504 return retval;
505
506 if (entitlement) {
507 new_weight = ppp_data.weight;
508 new_entitled = *entitlement;
509 } else if (weight) {
510 new_weight = *weight;
511 new_entitled = ppp_data.entitlement;
512 } else
513 return -EINVAL;
514
515 pr_debug("%s: current_entitled = %llu, current_weight = %u\n",
516 __func__, ppp_data.entitlement, ppp_data.weight);
517
518 pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
519 __func__, new_entitled, new_weight);
520
521 retval = plpar_hcall_norets(H_SET_PPP, new_entitled, new_weight);
522 return retval;
523}
524
525/**
526 * update_mpp
527 *
528 * Update the memory entitlement and weight for the partition. Caller must
529 * specify either a new entitlement or weight, not both, to be updated
530 * since the h_set_mpp call takes both entitlement and weight as parameters.
531 */
532static ssize_t update_mpp(u64 *entitlement, u8 *weight)
533{
534 struct hvcall_mpp_data mpp_data;
535 u64 new_entitled;
536 u8 new_weight;
537 ssize_t rc;
538
539 if (entitlement) {
540 /* Check with vio to ensure the new memory entitlement
541 * can be handled.
542 */
543 rc = vio_cmo_entitlement_update(*entitlement);
544 if (rc)
545 return rc;
546 }
547
548 rc = h_get_mpp(&mpp_data);
549 if (rc)
550 return rc;
551
552 if (entitlement) {
553 new_weight = mpp_data.mem_weight;
554 new_entitled = *entitlement;
555 } else if (weight) {
556 new_weight = *weight;
557 new_entitled = mpp_data.entitled_mem;
558 } else
559 return -EINVAL;
560
561 pr_debug("%s: current_entitled = %lu, current_weight = %u\n",
562 __func__, mpp_data.entitled_mem, mpp_data.mem_weight);
563
564 pr_debug("%s: new_entitled = %llu, new_weight = %u\n",
565 __func__, new_entitled, new_weight);
566
567 rc = plpar_hcall_norets(H_SET_MPP, new_entitled, new_weight);
568 return rc;
569}
570
571/*
572 * Interface for changing system parameters (variable capacity weight
573 * and entitled capacity). Format of input is "param_name=value";
574 * anything after value is ignored. Valid parameters at this time are
575 * "partition_entitled_capacity" and "capacity_weight". We use
576 * H_SET_PPP to alter parameters.
577 *
578 * This function should be invoked only on systems with
579 * FW_FEATURE_SPLPAR.
580 */
581static ssize_t lparcfg_write(struct file *file, const char __user * buf,
582 size_t count, loff_t * off)
583{
584 int kbuf_sz = 64;
585 char kbuf[kbuf_sz];
586 char *tmp;
587 u64 new_entitled, *new_entitled_ptr = &new_entitled;
588 u8 new_weight, *new_weight_ptr = &new_weight;
589 ssize_t retval;
590
591 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
592 return -EINVAL;
593
594 if (count > kbuf_sz)
595 return -EINVAL;
596
597 if (copy_from_user(kbuf, buf, count))
598 return -EFAULT;
599
600 kbuf[count - 1] = '\0';
601 tmp = strchr(kbuf, '=');
602 if (!tmp)
603 return -EINVAL;
604
605 *tmp++ = '\0';
606
607 if (!strcmp(kbuf, "partition_entitled_capacity")) {
608 char *endp;
609 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
610 if (endp == tmp)
611 return -EINVAL;
612
613 retval = update_ppp(new_entitled_ptr, NULL);
614 } else if (!strcmp(kbuf, "capacity_weight")) {
615 char *endp;
616 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
617 if (endp == tmp)
618 return -EINVAL;
619
620 retval = update_ppp(NULL, new_weight_ptr);
621 } else if (!strcmp(kbuf, "entitled_memory")) {
622 char *endp;
623 *new_entitled_ptr = (u64) simple_strtoul(tmp, &endp, 10);
624 if (endp == tmp)
625 return -EINVAL;
626
627 retval = update_mpp(new_entitled_ptr, NULL);
628 } else if (!strcmp(kbuf, "entitled_memory_weight")) {
629 char *endp;
630 *new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
631 if (endp == tmp)
632 return -EINVAL;
633
634 retval = update_mpp(NULL, new_weight_ptr);
635 } else
636 return -EINVAL;
637
638 if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
639 retval = count;
640 } else if (retval == H_BUSY) {
641 retval = -EBUSY;
642 } else if (retval == H_HARDWARE) {
643 retval = -EIO;
644 } else if (retval == H_PARAMETER) {
645 retval = -EINVAL;
646 }
647
648 return retval;
649}
650
651static int lparcfg_data(struct seq_file *m, void *v)
652{
653 struct device_node *rootdn;
654 const char *model = "";
655 const char *system_id = "";
656 const char *tmp;
657 const unsigned int *lp_index_ptr;
658 unsigned int lp_index = 0;
659
660 seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
661
662 rootdn = of_find_node_by_path("/");
663 if (rootdn) {
664 tmp = of_get_property(rootdn, "model", NULL);
665 if (tmp)
666 model = tmp;
667 tmp = of_get_property(rootdn, "system-id", NULL);
668 if (tmp)
669 system_id = tmp;
670 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
671 NULL);
672 if (lp_index_ptr)
673 lp_index = *lp_index_ptr;
674 of_node_put(rootdn);
675 }
676 seq_printf(m, "serial_number=%s\n", system_id);
677 seq_printf(m, "system_type=%s\n", model);
678 seq_printf(m, "partition_id=%d\n", (int)lp_index);
679
680 return pseries_lparcfg_data(m, v);
681}
682
683static int lparcfg_open(struct inode *inode, struct file *file)
684{
685 return single_open(file, lparcfg_data, NULL);
686}
687
688static const struct file_operations lparcfg_fops = {
689 .read = seq_read,
690 .write = lparcfg_write,
691 .open = lparcfg_open,
692 .release = single_release,
693 .llseek = seq_lseek,
694};
695
696static int __init lparcfg_init(void)
697{
698 umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
699
700 /* Allow writing if we have FW_FEATURE_SPLPAR */
701 if (firmware_has_feature(FW_FEATURE_SPLPAR))
702 mode |= S_IWUSR;
703
704 if (!proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops)) {
705 printk(KERN_ERR "Failed to create powerpc/lparcfg\n");
706 return -EIO;
707 }
708 return 0;
709}
710machine_device_initcall(pseries, lparcfg_init);
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 3d01eee9ffb1..cde4e0a095ae 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -28,7 +28,7 @@ struct update_props_workarea {
28 u32 state; 28 u32 state;
29 u64 reserved; 29 u64 reserved;
30 u32 nprops; 30 u32 nprops;
31}; 31} __packed;
32 32
33#define NODE_ACTION_MASK 0xff000000 33#define NODE_ACTION_MASK 0xff000000
34#define NODE_COUNT_MASK 0x00ffffff 34#define NODE_COUNT_MASK 0x00ffffff
@@ -62,6 +62,7 @@ static int delete_dt_node(u32 phandle)
62 return -ENOENT; 62 return -ENOENT;
63 63
64 dlpar_detach_node(dn); 64 dlpar_detach_node(dn);
65 of_node_put(dn);
65 return 0; 66 return 0;
66} 67}
67 68
@@ -119,7 +120,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
119 120
120 if (!more) { 121 if (!more) {
121 of_update_property(dn, new_prop); 122 of_update_property(dn, new_prop);
122 new_prop = NULL; 123 *prop = NULL;
123 } 124 }
124 125
125 return 0; 126 return 0;
@@ -130,7 +131,7 @@ static int update_dt_node(u32 phandle, s32 scope)
130 struct update_props_workarea *upwa; 131 struct update_props_workarea *upwa;
131 struct device_node *dn; 132 struct device_node *dn;
132 struct property *prop = NULL; 133 struct property *prop = NULL;
133 int i, rc; 134 int i, rc, rtas_rc;
134 char *prop_data; 135 char *prop_data;
135 char *rtas_buf; 136 char *rtas_buf;
136 int update_properties_token; 137 int update_properties_token;
@@ -154,25 +155,26 @@ static int update_dt_node(u32 phandle, s32 scope)
154 upwa->phandle = phandle; 155 upwa->phandle = phandle;
155 156
156 do { 157 do {
157 rc = mobility_rtas_call(update_properties_token, rtas_buf, 158 rtas_rc = mobility_rtas_call(update_properties_token, rtas_buf,
158 scope); 159 scope);
159 if (rc < 0) 160 if (rtas_rc < 0)
160 break; 161 break;
161 162
162 prop_data = rtas_buf + sizeof(*upwa); 163 prop_data = rtas_buf + sizeof(*upwa);
163 164
164 /* The first element of the buffer is the path of the node 165 /* On the first call to ibm,update-properties for a node the
165 * being updated in the form of a 8 byte string length 166 * the first property value descriptor contains an empty
166 * followed by the string. Skip past this to get to the 167 * property name, the property value length encoded as u32,
167 * properties being updated. 168 * and the property value is the node path being updated.
168 */ 169 */
169 vd = *prop_data++; 170 if (*prop_data == 0) {
170 prop_data += vd; 171 prop_data++;
172 vd = *(u32 *)prop_data;
173 prop_data += vd + sizeof(vd);
174 upwa->nprops--;
175 }
171 176
172 /* The path we skipped over is counted as one of the elements 177 for (i = 0; i < upwa->nprops; i++) {
173 * returned so start counting at one.
174 */
175 for (i = 1; i < upwa->nprops; i++) {
176 char *prop_name; 178 char *prop_name;
177 179
178 prop_name = prop_data; 180 prop_name = prop_data;
@@ -202,7 +204,7 @@ static int update_dt_node(u32 phandle, s32 scope)
202 prop_data += vd; 204 prop_data += vd;
203 } 205 }
204 } 206 }
205 } while (rc == 1); 207 } while (rtas_rc == 1);
206 208
207 of_node_put(dn); 209 of_node_put(dn);
208 kfree(rtas_buf); 210 kfree(rtas_buf);
@@ -215,17 +217,14 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
215 struct device_node *parent_dn; 217 struct device_node *parent_dn;
216 int rc; 218 int rc;
217 219
218 dn = dlpar_configure_connector(drc_index); 220 parent_dn = of_find_node_by_phandle(parent_phandle);
219 if (!dn) 221 if (!parent_dn)
220 return -ENOENT; 222 return -ENOENT;
221 223
222 parent_dn = of_find_node_by_phandle(parent_phandle); 224 dn = dlpar_configure_connector(drc_index, parent_dn);
223 if (!parent_dn) { 225 if (!dn)
224 dlpar_free_cc_nodes(dn);
225 return -ENOENT; 226 return -ENOENT;
226 }
227 227
228 dn->parent = parent_dn;
229 rc = dlpar_attach_node(dn); 228 rc = dlpar_attach_node(dn);
230 if (rc) 229 if (rc)
231 dlpar_free_cc_nodes(dn); 230 dlpar_free_cc_nodes(dn);
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
deleted file mode 100644
index f35787b6a5e0..000000000000
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ /dev/null
@@ -1,324 +0,0 @@
1#ifndef _PSERIES_PLPAR_WRAPPERS_H
2#define _PSERIES_PLPAR_WRAPPERS_H
3
4#include <linux/string.h>
5#include <linux/irqflags.h>
6
7#include <asm/hvcall.h>
8#include <asm/paca.h>
9#include <asm/page.h>
10
11/* Get state of physical CPU from query_cpu_stopped */
12int smp_query_cpu_stopped(unsigned int pcpu);
13#define QCSS_STOPPED 0
14#define QCSS_STOPPING 1
15#define QCSS_NOT_STOPPED 2
16#define QCSS_HARDWARE_ERROR -1
17#define QCSS_HARDWARE_BUSY -2
18
19static inline long poll_pending(void)
20{
21 return plpar_hcall_norets(H_POLL_PENDING);
22}
23
24static inline u8 get_cede_latency_hint(void)
25{
26 return get_lppaca()->cede_latency_hint;
27}
28
29static inline void set_cede_latency_hint(u8 latency_hint)
30{
31 get_lppaca()->cede_latency_hint = latency_hint;
32}
33
34static inline long cede_processor(void)
35{
36 return plpar_hcall_norets(H_CEDE);
37}
38
39static inline long extended_cede_processor(unsigned long latency_hint)
40{
41 long rc;
42 u8 old_latency_hint = get_cede_latency_hint();
43
44 set_cede_latency_hint(latency_hint);
45
46 rc = cede_processor();
47#ifdef CONFIG_TRACE_IRQFLAGS
48 /* Ensure that H_CEDE returns with IRQs on */
49 if (WARN_ON(!(mfmsr() & MSR_EE)))
50 __hard_irq_enable();
51#endif
52
53 set_cede_latency_hint(old_latency_hint);
54
55 return rc;
56}
57
58static inline long vpa_call(unsigned long flags, unsigned long cpu,
59 unsigned long vpa)
60{
61 flags = flags << H_VPA_FUNC_SHIFT;
62
63 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
64}
65
66static inline long unregister_vpa(unsigned long cpu)
67{
68 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
69}
70
71static inline long register_vpa(unsigned long cpu, unsigned long vpa)
72{
73 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
74}
75
76static inline long unregister_slb_shadow(unsigned long cpu)
77{
78 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
79}
80
81static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
82{
83 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
84}
85
86static inline long unregister_dtl(unsigned long cpu)
87{
88 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
89}
90
91static inline long register_dtl(unsigned long cpu, unsigned long vpa)
92{
93 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
94}
95
96static inline long plpar_page_set_loaned(unsigned long vpa)
97{
98 unsigned long cmo_page_sz = cmo_get_page_size();
99 long rc = 0;
100 int i;
101
102 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
103 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
104
105 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
106 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
107 vpa + i - cmo_page_sz, 0);
108
109 return rc;
110}
111
112static inline long plpar_page_set_active(unsigned long vpa)
113{
114 unsigned long cmo_page_sz = cmo_get_page_size();
115 long rc = 0;
116 int i;
117
118 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
119 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
120
121 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
122 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
123 vpa + i - cmo_page_sz, 0);
124
125 return rc;
126}
127
128extern void vpa_init(int cpu);
129
130static inline long plpar_pte_enter(unsigned long flags,
131 unsigned long hpte_group, unsigned long hpte_v,
132 unsigned long hpte_r, unsigned long *slot)
133{
134 long rc;
135 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
136
137 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
138
139 *slot = retbuf[0];
140
141 return rc;
142}
143
144static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
145 unsigned long avpn, unsigned long *old_pteh_ret,
146 unsigned long *old_ptel_ret)
147{
148 long rc;
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
150
151 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
152
153 *old_pteh_ret = retbuf[0];
154 *old_ptel_ret = retbuf[1];
155
156 return rc;
157}
158
159/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
160static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
161 unsigned long avpn, unsigned long *old_pteh_ret,
162 unsigned long *old_ptel_ret)
163{
164 long rc;
165 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
166
167 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
168
169 *old_pteh_ret = retbuf[0];
170 *old_ptel_ret = retbuf[1];
171
172 return rc;
173}
174
175static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
176 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
177{
178 long rc;
179 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
180
181 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
182
183 *old_pteh_ret = retbuf[0];
184 *old_ptel_ret = retbuf[1];
185
186 return rc;
187}
188
189/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
190static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
191 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
192{
193 long rc;
194 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
195
196 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
197
198 *old_pteh_ret = retbuf[0];
199 *old_ptel_ret = retbuf[1];
200
201 return rc;
202}
203
204/*
205 * plpar_pte_read_4_raw can be called in real mode.
206 * ptes must be 8*sizeof(unsigned long)
207 */
208static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
209 unsigned long *ptes)
210
211{
212 long rc;
213 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
214
215 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
216
217 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
218
219 return rc;
220}
221
222static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
223 unsigned long avpn)
224{
225 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
226}
227
228static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
229 unsigned long *tce_ret)
230{
231 long rc;
232 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
233
234 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
235
236 *tce_ret = retbuf[0];
237
238 return rc;
239}
240
241static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
242 unsigned long tceval)
243{
244 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
245}
246
247static inline long plpar_tce_put_indirect(unsigned long liobn,
248 unsigned long ioba, unsigned long page, unsigned long count)
249{
250 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
251}
252
253static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
254 unsigned long tceval, unsigned long count)
255{
256 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
257}
258
259static inline long plpar_get_term_char(unsigned long termno,
260 unsigned long *len_ret, char *buf_ret)
261{
262 long rc;
263 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
264 unsigned long *lbuf = (unsigned long *)buf_ret; /* TODO: alignment? */
265
266 rc = plpar_hcall(H_GET_TERM_CHAR, retbuf, termno);
267
268 *len_ret = retbuf[0];
269 lbuf[0] = retbuf[1];
270 lbuf[1] = retbuf[2];
271
272 return rc;
273}
274
275static inline long plpar_put_term_char(unsigned long termno, unsigned long len,
276 const char *buffer)
277{
278 unsigned long *lbuf = (unsigned long *)buffer; /* TODO: alignment? */
279 return plpar_hcall_norets(H_PUT_TERM_CHAR, termno, len, lbuf[0],
280 lbuf[1]);
281}
282
283/* Set various resource mode parameters */
284static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
285 unsigned long value1, unsigned long value2)
286{
287 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
288}
289
290/*
291 * Enable relocation on exceptions on this partition
292 *
293 * Note: this call has a partition wide scope and can take a while to complete.
294 * If it returns H_LONG_BUSY_* it should be retried periodically until it
295 * returns H_SUCCESS.
296 */
297static inline long enable_reloc_on_exceptions(void)
298{
299 /* mflags = 3: Exceptions at 0xC000000000004000 */
300 return plpar_set_mode(3, 3, 0, 0);
301}
302
303/*
304 * Disable relocation on exceptions on this partition
305 *
306 * Note: this call has a partition wide scope and can take a while to complete.
307 * If it returns H_LONG_BUSY_* it should be retried periodically until it
308 * returns H_SUCCESS.
309 */
310static inline long disable_reloc_on_exceptions(void) {
311 return plpar_set_mode(0, 3, 0, 0);
312}
313
314static inline long plapr_set_ciabr(unsigned long ciabr)
315{
316 return plpar_set_mode(0, 1, ciabr, 0);
317}
318
319static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
320{
321 return plpar_set_mode(0, 2, dawr0, dawrx0);
322}
323
324#endif /* _PSERIES_PLPAR_WRAPPERS_H */
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 4644efa06941..a166e38bd683 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -18,9 +18,7 @@
18#include <asm/machdep.h> 18#include <asm/machdep.h>
19#include <asm/firmware.h> 19#include <asm/firmware.h>
20#include <asm/runlatch.h> 20#include <asm/runlatch.h>
21 21#include <asm/plpar_wrappers.h>
22#include "plpar_wrappers.h"
23#include "pseries.h"
24 22
25struct cpuidle_driver pseries_idle_driver = { 23struct cpuidle_driver pseries_idle_driver = {
26 .name = "pseries_idle", 24 .name = "pseries_idle",
@@ -45,7 +43,11 @@ static inline void idle_loop_prolog(unsigned long *in_purr)
45 43
46static inline void idle_loop_epilog(unsigned long in_purr) 44static inline void idle_loop_epilog(unsigned long in_purr)
47{ 45{
48 get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; 46 u64 wait_cycles;
47
48 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles);
49 wait_cycles += mfspr(SPRN_PURR) - in_purr;
50 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles);
49 get_lppaca()->idle = 0; 51 get_lppaca()->idle = 0;
50} 52}
51 53
@@ -308,7 +310,7 @@ static int pseries_idle_probe(void)
308 return -EPERM; 310 return -EPERM;
309 } 311 }
310 312
311 if (get_lppaca()->shared_proc) 313 if (lppaca_shared_proc(get_lppaca()))
312 cpuidle_state_table = shared_states; 314 cpuidle_state_table = shared_states;
313 else 315 else
314 cpuidle_state_table = dedicated_states; 316 cpuidle_state_table = dedicated_states;
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index c2a3a258001c..99219530ea4a 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -56,13 +56,10 @@ extern void hvc_vio_init_early(void);
56/* Dynamic logical Partitioning/Mobility */ 56/* Dynamic logical Partitioning/Mobility */
57extern void dlpar_free_cc_nodes(struct device_node *); 57extern void dlpar_free_cc_nodes(struct device_node *);
58extern void dlpar_free_cc_property(struct property *); 58extern void dlpar_free_cc_property(struct property *);
59extern struct device_node *dlpar_configure_connector(u32); 59extern struct device_node *dlpar_configure_connector(u32, struct device_node *);
60extern int dlpar_attach_node(struct device_node *); 60extern int dlpar_attach_node(struct device_node *);
61extern int dlpar_detach_node(struct device_node *); 61extern int dlpar_detach_node(struct device_node *);
62 62
63/* Snooze Delay, pseries_idle */
64DECLARE_PER_CPU(long, smt_snooze_delay);
65
66/* PCI root bridge prepare function override for pseries */ 63/* PCI root bridge prepare function override for pseries */
67struct pci_host_bridge; 64struct pci_host_bridge;
68int pseries_root_bridge_prepare(struct pci_host_bridge *bridge); 65int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c
index a91e6dadda2c..92767791f93b 100644
--- a/arch/powerpc/platforms/pseries/pseries_energy.c
+++ b/arch/powerpc/platforms/pseries/pseries_energy.c
@@ -108,8 +108,8 @@ err:
108 * energy consumption. 108 * energy consumption.
109 */ 109 */
110 110
111#define FLAGS_MODE1 0x004E200000080E01 111#define FLAGS_MODE1 0x004E200000080E01UL
112#define FLAGS_MODE2 0x004E200000080401 112#define FLAGS_MODE2 0x004E200000080401UL
113#define FLAGS_ACTIVATE 0x100 113#define FLAGS_ACTIVATE 0x100
114 114
115static ssize_t get_best_energy_list(char *page, int activate) 115static ssize_t get_best_energy_list(char *page, int activate)
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index c11c8238797c..d64feb3ea0be 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -66,8 +66,8 @@
66#include <asm/firmware.h> 66#include <asm/firmware.h>
67#include <asm/eeh.h> 67#include <asm/eeh.h>
68#include <asm/reg.h> 68#include <asm/reg.h>
69#include <asm/plpar_wrappers.h>
69 70
70#include "plpar_wrappers.h"
71#include "pseries.h" 71#include "pseries.h"
72 72
73int CMO_PrPSP = -1; 73int CMO_PrPSP = -1;
@@ -183,7 +183,7 @@ static void __init pseries_mpic_init_IRQ(void)
183 np = of_find_node_by_path("/"); 183 np = of_find_node_by_path("/");
184 naddr = of_n_addr_cells(np); 184 naddr = of_n_addr_cells(np);
185 opprop = of_get_property(np, "platform-open-pic", &opplen); 185 opprop = of_get_property(np, "platform-open-pic", &opplen);
186 if (opprop != 0) { 186 if (opprop != NULL) {
187 openpic_addr = of_read_number(opprop, naddr); 187 openpic_addr = of_read_number(opprop, naddr);
188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 188 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
189 } 189 }
@@ -323,7 +323,7 @@ static int alloc_dispatch_logs(void)
323 get_paca()->lppaca_ptr->dtl_idx = 0; 323 get_paca()->lppaca_ptr->dtl_idx = 0;
324 324
325 /* hypervisor reads buffer length from this field */ 325 /* hypervisor reads buffer length from this field */
326 dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; 326 dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES);
327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); 327 ret = register_dtl(hard_smp_processor_id(), __pa(dtl));
328 if (ret) 328 if (ret)
329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed " 329 pr_err("WARNING: DTL registration of cpu %d (hw %d) failed "
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 306643cc9dbc..1c1771a40250 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -43,8 +43,8 @@
43#include <asm/cputhreads.h> 43#include <asm/cputhreads.h>
44#include <asm/xics.h> 44#include <asm/xics.h>
45#include <asm/dbell.h> 45#include <asm/dbell.h>
46#include <asm/plpar_wrappers.h>
46 47
47#include "plpar_wrappers.h"
48#include "pseries.h" 48#include "pseries.h"
49#include "offline_states.h" 49#include "offline_states.h"
50 50
@@ -187,22 +187,6 @@ static int smp_pSeries_kick_cpu(int nr)
187 return 0; 187 return 0;
188} 188}
189 189
190static int smp_pSeries_cpu_bootable(unsigned int nr)
191{
192 /* Special case - we inhibit secondary thread startup
193 * during boot if the user requests it.
194 */
195 if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
196 if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
197 return 0;
198 if (smt_enabled_at_boot
199 && cpu_thread_in_core(nr) >= smt_enabled_at_boot)
200 return 0;
201 }
202
203 return 1;
204}
205
206/* Only used on systems that support multiple IPI mechanisms */ 190/* Only used on systems that support multiple IPI mechanisms */
207static void pSeries_cause_ipi_mux(int cpu, unsigned long data) 191static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
208{ 192{
@@ -237,7 +221,7 @@ static struct smp_ops_t pSeries_xics_smp_ops = {
237 .probe = pSeries_smp_probe, 221 .probe = pSeries_smp_probe,
238 .kick_cpu = smp_pSeries_kick_cpu, 222 .kick_cpu = smp_pSeries_kick_cpu,
239 .setup_cpu = smp_xics_setup_cpu, 223 .setup_cpu = smp_xics_setup_cpu,
240 .cpu_bootable = smp_pSeries_cpu_bootable, 224 .cpu_bootable = smp_generic_cpu_bootable,
241}; 225};
242 226
243/* This is called very early */ 227/* This is called very early */