aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/futex.h27
-rw-r--r--arch/powerpc/include/asm/ioctls.h1
-rw-r--r--arch/powerpc/include/asm/lppaca.h16
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/include/asm/rwsem.h51
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S2
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/iseries/dt.c6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c1
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c66
15 files changed, 111 insertions, 103 deletions
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 7c589ef81fb0..c94e4a3fe2ef 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -30,7 +30,7 @@
30 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ 30 : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
31 : "cr0", "memory") 31 : "cr0", "memory")
32 32
33static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) 33static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
34{ 34{
35 int op = (encoded_op >> 28) & 7; 35 int op = (encoded_op >> 28) & 7;
36 int cmp = (encoded_op >> 24) & 15; 36 int cmp = (encoded_op >> 24) & 15;
@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
40 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 40 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
41 oparg = 1 << oparg; 41 oparg = 1 << oparg;
42 42
43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) 43 if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
44 return -EFAULT; 44 return -EFAULT;
45 45
46 pagefault_disable(); 46 pagefault_disable();
@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
82} 82}
83 83
84static inline int 84static inline int
85futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) 85futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
86 u32 oldval, u32 newval)
86{ 87{
87 int prev; 88 int ret = 0;
89 u32 prev;
88 90
89 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) 91 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
90 return -EFAULT; 92 return -EFAULT;
91 93
92 __asm__ __volatile__ ( 94 __asm__ __volatile__ (
93 PPC_RELEASE_BARRIER 95 PPC_RELEASE_BARRIER
94"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ 96"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\
95 cmpw 0,%0,%3\n\ 97 cmpw 0,%1,%4\n\
96 bne- 3f\n" 98 bne- 3f\n"
97 PPC405_ERR77(0,%2) 99 PPC405_ERR77(0,%3)
98"2: stwcx. %4,0,%2\n\ 100"2: stwcx. %5,0,%3\n\
99 bne- 1b\n" 101 bne- 1b\n"
100 PPC_ACQUIRE_BARRIER 102 PPC_ACQUIRE_BARRIER
101"3: .section .fixup,\"ax\"\n\ 103"3: .section .fixup,\"ax\"\n\
1024: li %0,%5\n\ 1044: li %0,%6\n\
103 b 3b\n\ 105 b 3b\n\
104 .previous\n\ 106 .previous\n\
105 .section __ex_table,\"a\"\n\ 107 .section __ex_table,\"a\"\n\
106 .align 3\n\ 108 .align 3\n\
107 " PPC_LONG "1b,4b,2b,4b\n\ 109 " PPC_LONG "1b,4b,2b,4b\n\
108 .previous" \ 110 .previous" \
109 : "=&r" (prev), "+m" (*uaddr) 111 : "+r" (ret), "=&r" (prev), "+m" (*uaddr)
110 : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) 112 : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)
111 : "cc", "memory"); 113 : "cc", "memory");
112 114
113 return prev; 115 *uval = prev;
116 return ret;
114} 117}
115 118
116#endif /* __KERNEL__ */ 119#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h
index c7dc17cf84f1..e9b78870aaab 100644
--- a/arch/powerpc/include/asm/ioctls.h
+++ b/arch/powerpc/include/asm/ioctls.h
@@ -96,6 +96,7 @@
96#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ 96#define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */
97#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ 97#define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */
98#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ 98#define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */
99#define TIOCVHANGUP 0x5437
99 100
100#define TIOCSERCONFIG 0x5453 101#define TIOCSERCONFIG 0x5453
101#define TIOCSERGWILD 0x5454 102#define TIOCSERGWILD 0x5454
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 380d48bacd16..26b8c807f8f1 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -33,9 +33,25 @@
33// 33//
34//---------------------------------------------------------------------------- 34//----------------------------------------------------------------------------
35#include <linux/cache.h> 35#include <linux/cache.h>
36#include <linux/threads.h>
36#include <asm/types.h> 37#include <asm/types.h>
37#include <asm/mmu.h> 38#include <asm/mmu.h>
38 39
40/*
41 * We only have to have statically allocated lppaca structs on
42 * legacy iSeries, which supports at most 64 cpus.
43 */
44#ifdef CONFIG_PPC_ISERIES
45#if NR_CPUS < 64
46#define NR_LPPACAS NR_CPUS
47#else
48#define NR_LPPACAS 64
49#endif
50#else /* not iSeries */
51#define NR_LPPACAS 1
52#endif
53
54
39/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 55/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
40 * alignment is sufficient to prevent this */ 56 * alignment is sufficient to prevent this */
41struct lppaca { 57struct lppaca {
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 991d5998d6be..fe56a23e1ff0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -240,6 +240,12 @@ struct machdep_calls {
240 * claims to support kexec. 240 * claims to support kexec.
241 */ 241 */
242 int (*machine_kexec_prepare)(struct kimage *image); 242 int (*machine_kexec_prepare)(struct kimage *image);
243
244 /* Called to perform the _real_ kexec.
245 * Do NOT allocate memory or fail here. We are past the point of
246 * no return.
247 */
248 void (*machine_kexec)(struct kimage *image);
243#endif /* CONFIG_KEXEC */ 249#endif /* CONFIG_KEXEC */
244 250
245#ifdef CONFIG_SUSPEND 251#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h
index 8447d89fbe72..bb1e2cdeb9bf 100644
--- a/arch/powerpc/include/asm/rwsem.h
+++ b/arch/powerpc/include/asm/rwsem.h
@@ -13,11 +13,6 @@
13 * by Paul Mackerras <paulus@samba.org>. 13 * by Paul Mackerras <paulus@samba.org>.
14 */ 14 */
15 15
16#include <linux/list.h>
17#include <linux/spinlock.h>
18#include <asm/atomic.h>
19#include <asm/system.h>
20
21/* 16/*
22 * the semaphore definition 17 * the semaphore definition
23 */ 18 */
@@ -33,47 +28,6 @@
33#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 28#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
34#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 29#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
35 30
36struct rw_semaphore {
37 long count;
38 spinlock_t wait_lock;
39 struct list_head wait_list;
40#ifdef CONFIG_DEBUG_LOCK_ALLOC
41 struct lockdep_map dep_map;
42#endif
43};
44
45#ifdef CONFIG_DEBUG_LOCK_ALLOC
46# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
47#else
48# define __RWSEM_DEP_MAP_INIT(lockname)
49#endif
50
51#define __RWSEM_INITIALIZER(name) \
52{ \
53 RWSEM_UNLOCKED_VALUE, \
54 __SPIN_LOCK_UNLOCKED((name).wait_lock), \
55 LIST_HEAD_INIT((name).wait_list) \
56 __RWSEM_DEP_MAP_INIT(name) \
57}
58
59#define DECLARE_RWSEM(name) \
60 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
61
62extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
63extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
64extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
65extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
66
67extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
68 struct lock_class_key *key);
69
70#define init_rwsem(sem) \
71 do { \
72 static struct lock_class_key __key; \
73 \
74 __init_rwsem((sem), #sem, &__key); \
75 } while (0)
76
77/* 31/*
78 * lock for reading 32 * lock for reading
79 */ 33 */
@@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
174 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); 128 return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
175} 129}
176 130
177static inline int rwsem_is_locked(struct rw_semaphore *sem)
178{
179 return sem->count != 0;
180}
181
182#endif /* __KERNEL__ */ 131#endif /* __KERNEL__ */
183#endif /* _ASM_POWERPC_RWSEM_H */ 132#endif /* _ASM_POWERPC_RWSEM_H */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 49a170af8145..a5f8672eeff3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
87 87
88 save_ftrace_enabled = __ftrace_enabled_save(); 88 save_ftrace_enabled = __ftrace_enabled_save();
89 89
90 default_machine_kexec(image); 90 if (ppc_md.machine_kexec)
91 ppc_md.machine_kexec(image);
92 else
93 default_machine_kexec(image);
91 94
92 __ftrace_enabled_restore(save_ftrace_enabled); 95 __ftrace_enabled_restore(save_ftrace_enabled);
93 96
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ebf9846f3c3b..f4adf89d7614 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -27,20 +27,6 @@ extern unsigned long __toc_start;
27#ifdef CONFIG_PPC_BOOK3S 27#ifdef CONFIG_PPC_BOOK3S
28 28
29/* 29/*
30 * We only have to have statically allocated lppaca structs on
31 * legacy iSeries, which supports at most 64 cpus.
32 */
33#ifdef CONFIG_PPC_ISERIES
34#if NR_CPUS < 64
35#define NR_LPPACAS NR_CPUS
36#else
37#define NR_LPPACAS 64
38#endif
39#else /* not iSeries */
40#define NR_LPPACAS 1
41#endif
42
43/*
44 * The structure which the hypervisor knows about - this structure 30 * The structure which the hypervisor knows about - this structure
45 * should not cross a page boundary. The vpa_init/register_vpa call 31 * should not cross a page boundary. The vpa_init/register_vpa call
46 * is now known to fail if the lppaca structure crosses a page 32 * is now known to fail if the lppaca structure crosses a page
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a1d5cb76932..8303a6c65ef7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
353 prime_debug_regs(new_thread); 353 prime_debug_regs(new_thread);
354} 354}
355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
356#ifndef CONFIG_HAVE_HW_BREAKPOINT
356static void set_debug_reg_defaults(struct thread_struct *thread) 357static void set_debug_reg_defaults(struct thread_struct *thread)
357{ 358{
358 if (thread->dabr) { 359 if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
360 set_dabr(0); 361 set_dabr(0);
361 } 362 }
362} 363}
364#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
363#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
364 366
365int set_dabr(unsigned long dabr) 367int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
670{ 672{
671 discard_lazy_cpu_state(); 673 discard_lazy_cpu_state();
672 674
673#ifdef CONFIG_HAVE_HW_BREAKPOINTS 675#ifdef CONFIG_HAVE_HW_BREAKPOINT
674 flush_ptrace_hw_breakpoint(current); 676 flush_ptrace_hw_breakpoint(current);
675#else /* CONFIG_HAVE_HW_BREAKPOINTS */ 677#else /* CONFIG_HAVE_HW_BREAKPOINT */
676 set_debug_reg_defaults(&current->thread); 678 set_debug_reg_defaults(&current->thread);
677#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 679#endif /* CONFIG_HAVE_HW_BREAKPOINT */
678} 680}
679 681
680void 682void
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 8a0deefac08d..b9150f07d266 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -160,7 +160,7 @@ SECTIONS
160 INIT_RAM_FS 160 INIT_RAM_FS
161 } 161 }
162 162
163 PERCPU(PAGE_SIZE) 163 PERCPU(L1_CACHE_BYTES, PAGE_SIZE)
164 164
165 . = ALIGN(8); 165 . = ALIGN(8);
166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { 166 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index fd4812329570..0dc95c0aa3be 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1516,7 +1516,8 @@ int start_topology_update(void)
1516{ 1516{
1517 int rc = 0; 1517 int rc = 0;
1518 1518
1519 if (firmware_has_feature(FW_FEATURE_VPHN) && 1519 /* Disabled until races with load balancing are fixed */
1520 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1520 get_lppaca()->shared_proc) { 1521 get_lppaca()->shared_proc) {
1521 vphn_enabled = 1; 1522 vphn_enabled = 1;
1522 setup_cpu_associativity_change_counters(); 1523 setup_cpu_associativity_change_counters();
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * neesd to be flushed. This function will either perform the flush 38 * neesd to be flushed. This function will either perform the flush
39 * immediately or will batch it up if the current CPU has an active 39 * immediately or will batch it up if the current CPU has an active
40 * batch on it. 40 * batch on it.
41 *
42 * Must be called from within some kind of spinlock/non-preempt region...
43 */ 41 */
44void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
46{ 44{
47 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
48 unsigned long vsid, vaddr; 46 unsigned long vsid, vaddr;
49 unsigned int psize; 47 unsigned int psize;
50 int ssize; 48 int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
99 */ 97 */
100 if (!batch->active) { 98 if (!batch->active) {
101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 99 flush_hash_page(vaddr, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch);
102 return; 101 return;
103 } 102 }
104 103
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
127 batch->index = ++i; 126 batch->index = ++i;
128 if (i >= PPC64_TLB_BATCH_NR) 127 if (i >= PPC64_TLB_BATCH_NR)
129 __flush_tlb_pending(batch); 128 __flush_tlb_pending(batch);
129 put_cpu_var(ppc64_tlb_batch);
130} 130}
131 131
132/* 132/*
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 187a7d32f86a..a3d2ce54ea2e 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
70 if (!IS_ERR(tmp)) { 70 if (!IS_ERR(tmp)) {
71 struct nameidata nd; 71 struct nameidata nd;
72 72
73 ret = path_lookup(tmp, LOOKUP_PARENT, &nd); 73 ret = kern_path_parent(tmp, &nd);
74 if (!ret) { 74 if (!ret) {
75 nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; 75 nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE;
76 ret = spufs_create(&nd, flags, mode, neighbor); 76 ret = spufs_create(&nd, flags, mode, neighbor);
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index fdb7384c0c4f..f0491cc28900 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244 244
245 for (i = 0; i < NR_CPUS; i++) { 245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca_of(i).dyn_proc_status >= 2) 246 if (lppaca[i].dyn_proc_status >= 2)
247 continue; 247 continue;
248 248
249 snprintf(p, 32 - (p - buf), "@%d", i); 249 snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
251 251
252 dt_prop_str(dt, "device_type", device_type_cpu); 252 dt_prop_str(dt, "device_type", device_type_cpu);
253 253
254 index = lppaca_of(i).dyn_hv_phys_proc_index; 254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index]; 255 d = &xIoHriProcessorVpd[index];
256 256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index b0863410517f..2946ae10fbfd 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
680 * on but calling this function multiple times is fine. 680 * on but calling this function multiple times is fine.
681 */ 681 */
682 identify_cpu(0, mfspr(SPRN_PVR)); 682 identify_cpu(0, mfspr(SPRN_PVR));
683 initialise_paca(&boot_paca, 0);
683 684
684 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
685 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index bc8803664140..33867ec4a234 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -17,6 +17,54 @@
17#include <asm/pSeries_reconfig.h> 17#include <asm/pSeries_reconfig.h>
18#include <asm/sparsemem.h> 18#include <asm/sparsemem.h>
19 19
20static unsigned long get_memblock_size(void)
21{
22 struct device_node *np;
23 unsigned int memblock_size = 0;
24
25 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
26 if (np) {
27 const unsigned long *size;
28
29 size = of_get_property(np, "ibm,lmb-size", NULL);
30 memblock_size = size ? *size : 0;
31
32 of_node_put(np);
33 } else {
34 unsigned int memzero_size = 0;
35 const unsigned int *regs;
36
37 np = of_find_node_by_path("/memory@0");
38 if (np) {
39 regs = of_get_property(np, "reg", NULL);
40 memzero_size = regs ? regs[3] : 0;
41 of_node_put(np);
42 }
43
44 if (memzero_size) {
45 /* We now know the size of memory@0, use this to find
46 * the first memoryblock and get its size.
47 */
48 char buf[64];
49
50 sprintf(buf, "/memory@%x", memzero_size);
51 np = of_find_node_by_path(buf);
52 if (np) {
53 regs = of_get_property(np, "reg", NULL);
54 memblock_size = regs ? regs[3] : 0;
55 of_node_put(np);
56 }
57 }
58 }
59
60 return memblock_size;
61}
62
63unsigned long memory_block_size_bytes(void)
64{
65 return get_memblock_size();
66}
67
20static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 68static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
21{ 69{
22 unsigned long start, start_pfn; 70 unsigned long start, start_pfn;
@@ -127,30 +175,22 @@ static int pseries_add_memory(struct device_node *np)
127 175
128static int pseries_drconf_memory(unsigned long *base, unsigned int action) 176static int pseries_drconf_memory(unsigned long *base, unsigned int action)
129{ 177{
130 struct device_node *np; 178 unsigned long memblock_size;
131 const unsigned long *lmb_size;
132 int rc; 179 int rc;
133 180
134 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 181 memblock_size = get_memblock_size();
135 if (!np) 182 if (!memblock_size)
136 return -EINVAL; 183 return -EINVAL;
137 184
138 lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
139 if (!lmb_size) {
140 of_node_put(np);
141 return -EINVAL;
142 }
143
144 if (action == PSERIES_DRCONF_MEM_ADD) { 185 if (action == PSERIES_DRCONF_MEM_ADD) {
145 rc = memblock_add(*base, *lmb_size); 186 rc = memblock_add(*base, memblock_size);
146 rc = (rc < 0) ? -EINVAL : 0; 187 rc = (rc < 0) ? -EINVAL : 0;
147 } else if (action == PSERIES_DRCONF_MEM_REMOVE) { 188 } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
148 rc = pseries_remove_memblock(*base, *lmb_size); 189 rc = pseries_remove_memblock(*base, memblock_size);
149 } else { 190 } else {
150 rc = -EINVAL; 191 rc = -EINVAL;
151 } 192 }
152 193
153 of_node_put(np);
154 return rc; 194 return rc;
155} 195}
156 196