aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/module.c6
-rw-r--r--arch/powerpc/kernel/process.c83
-rw-r--r--arch/powerpc/kernel/setup_32.c4
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.h10
-rw-r--r--arch/powerpc/kernel/signal_32.c109
-rw-r--r--arch/powerpc/kernel/signal_64.c43
-rw-r--r--arch/powerpc/kernel/vdso.c10
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32.lds.S3
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64.lds.S3
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/lib/feature-fixups-test.S21
-rw-r--r--arch/powerpc/lib/feature-fixups.c36
-rw-r--r--arch/powerpc/mm/mem.c1
-rw-r--r--arch/powerpc/mm/numa.c310
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c117
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c38
-rw-r--r--include/asm-powerpc/code-patching.h3
-rw-r--r--include/asm-powerpc/cputable.h21
-rw-r--r--include/asm-powerpc/elf.h20
-rw-r--r--include/asm-powerpc/feature-fixups.h10
-rw-r--r--include/asm-powerpc/pSeries_reconfig.h6
-rw-r--r--include/asm-powerpc/processor.h2
-rw-r--r--include/asm-powerpc/sparsemem.h4
-rw-r--r--include/asm-powerpc/synch.h38
25 files changed, 602 insertions, 304 deletions
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 40dd52d81c18..af07003573c4 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -86,6 +86,12 @@ int module_finalize(const Elf_Ehdr *hdr,
86 (void *)sect->sh_addr + sect->sh_size); 86 (void *)sect->sh_addr + sect->sh_size);
87#endif 87#endif
88 88
89 sect = find_section(hdr, sechdrs, "__lwsync_fixup");
90 if (sect != NULL)
91 do_lwsync_fixups(cur_cpu_spec->cpu_features,
92 (void *)sect->sh_addr,
93 (void *)sect->sh_addr + sect->sh_size);
94
89 return 0; 95 return 0;
90} 96}
91 97
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 1924b57bd241..85e557300d86 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -105,29 +105,6 @@ void enable_kernel_fp(void)
105} 105}
106EXPORT_SYMBOL(enable_kernel_fp); 106EXPORT_SYMBOL(enable_kernel_fp);
107 107
108int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
109{
110#ifdef CONFIG_VSX
111 int i;
112 elf_fpreg_t *reg;
113#endif
114
115 if (!tsk->thread.regs)
116 return 0;
117 flush_fp_to_thread(current);
118
119#ifdef CONFIG_VSX
120 reg = (elf_fpreg_t *)fpregs;
121 for (i = 0; i < ELF_NFPREG - 1; i++, reg++)
122 *reg = tsk->thread.TS_FPR(i);
123 memcpy(reg, &tsk->thread.fpscr, sizeof(elf_fpreg_t));
124#else
125 memcpy(fpregs, &tsk->thread.TS_FPR(0), sizeof(*fpregs));
126#endif
127
128 return 1;
129}
130
131#ifdef CONFIG_ALTIVEC 108#ifdef CONFIG_ALTIVEC
132void enable_kernel_altivec(void) 109void enable_kernel_altivec(void)
133{ 110{
@@ -161,35 +138,6 @@ void flush_altivec_to_thread(struct task_struct *tsk)
161 preempt_enable(); 138 preempt_enable();
162 } 139 }
163} 140}
164
165int dump_task_altivec(struct task_struct *tsk, elf_vrregset_t *vrregs)
166{
167 /* ELF_NVRREG includes the VSCR and VRSAVE which we need to save
168 * separately, see below */
169 const int nregs = ELF_NVRREG - 2;
170 elf_vrreg_t *reg;
171 u32 *dest;
172
173 if (tsk == current)
174 flush_altivec_to_thread(tsk);
175
176 reg = (elf_vrreg_t *)vrregs;
177
178 /* copy the 32 vr registers */
179 memcpy(reg, &tsk->thread.vr[0], nregs * sizeof(*reg));
180 reg += nregs;
181
182 /* copy the vscr */
183 memcpy(reg, &tsk->thread.vscr, sizeof(*reg));
184 reg++;
185
186 /* vrsave is stored in the high 32bit slot of the final 128bits */
187 memset(reg, 0, sizeof(*reg));
188 dest = (u32 *)reg;
189 *dest = tsk->thread.vrsave;
190
191 return 1;
192}
193#endif /* CONFIG_ALTIVEC */ 141#endif /* CONFIG_ALTIVEC */
194 142
195#ifdef CONFIG_VSX 143#ifdef CONFIG_VSX
@@ -224,29 +172,6 @@ void flush_vsx_to_thread(struct task_struct *tsk)
224 preempt_enable(); 172 preempt_enable();
225 } 173 }
226} 174}
227
228/*
229 * This dumps the lower half 64bits of the first 32 VSX registers.
230 * This needs to be called with dump_task_fp and dump_task_altivec to
231 * get all the VSX state.
232 */
233int dump_task_vsx(struct task_struct *tsk, elf_vrreg_t *vrregs)
234{
235 elf_vrreg_t *reg;
236 double buf[32];
237 int i;
238
239 if (tsk == current)
240 flush_vsx_to_thread(tsk);
241
242 reg = (elf_vrreg_t *)vrregs;
243
244 for (i = 0; i < 32 ; i++)
245 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
246 memcpy(reg, buf, sizeof(buf));
247
248 return 1;
249}
250#endif /* CONFIG_VSX */ 175#endif /* CONFIG_VSX */
251 176
252#ifdef CONFIG_SPE 177#ifdef CONFIG_SPE
@@ -279,14 +204,6 @@ void flush_spe_to_thread(struct task_struct *tsk)
279 preempt_enable(); 204 preempt_enable();
280 } 205 }
281} 206}
282
283int dump_spe(struct pt_regs *regs, elf_vrregset_t *evrregs)
284{
285 flush_spe_to_thread(current);
286 /* We copy u32 evr[32] + u64 acc + u32 spefscr -> 35 */
287 memcpy(evrregs, &current->thread.evr[0], sizeof(u32) * 35);
288 return 1;
289}
290#endif /* CONFIG_SPE */ 207#endif /* CONFIG_SPE */
291 208
292#ifndef CONFIG_SMP 209#ifndef CONFIG_SMP
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9e83add54290..0109e7f0ccf9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -101,6 +101,10 @@ unsigned long __init early_init(unsigned long dt_ptr)
101 PTRRELOC(&__start___ftr_fixup), 101 PTRRELOC(&__start___ftr_fixup),
102 PTRRELOC(&__stop___ftr_fixup)); 102 PTRRELOC(&__stop___ftr_fixup));
103 103
104 do_lwsync_fixups(spec->cpu_features,
105 PTRRELOC(&__start___lwsync_fixup),
106 PTRRELOC(&__stop___lwsync_fixup));
107
104 return KERNELBASE + offset; 108 return KERNELBASE + offset;
105} 109}
106 110
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 098fd96a394a..04d8de9f0fc6 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -363,6 +363,8 @@ void __init setup_system(void)
363 &__start___ftr_fixup, &__stop___ftr_fixup); 363 &__start___ftr_fixup, &__stop___ftr_fixup);
364 do_feature_fixups(powerpc_firmware_features, 364 do_feature_fixups(powerpc_firmware_features,
365 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); 365 &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup);
366 do_lwsync_fixups(cur_cpu_spec->cpu_features,
367 &__start___lwsync_fixup, &__stop___lwsync_fixup);
366 368
367 /* 369 /*
368 * Unflatten the device-tree passed by prom_init or kexec 370 * Unflatten the device-tree passed by prom_init or kexec
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 77efb3d5465a..28f4b9f5fe5e 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -24,6 +24,16 @@ extern int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
24 siginfo_t *info, sigset_t *oldset, 24 siginfo_t *info, sigset_t *oldset,
25 struct pt_regs *regs); 25 struct pt_regs *regs);
26 26
27extern unsigned long copy_fpr_to_user(void __user *to,
28 struct task_struct *task);
29extern unsigned long copy_fpr_from_user(struct task_struct *task,
30 void __user *from);
31#ifdef CONFIG_VSX
32extern unsigned long copy_vsx_to_user(void __user *to,
33 struct task_struct *task);
34extern unsigned long copy_vsx_from_user(struct task_struct *task,
35 void __user *from);
36#endif
27 37
28#ifdef CONFIG_PPC64 38#ifdef CONFIG_PPC64
29 39
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 349d3487d920..9991e2a58bf4 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -328,6 +328,75 @@ struct rt_sigframe {
328 int abigap[56]; 328 int abigap[56];
329}; 329};
330 330
331#ifdef CONFIG_VSX
332unsigned long copy_fpr_to_user(void __user *to,
333 struct task_struct *task)
334{
335 double buf[ELF_NFPREG];
336 int i;
337
338 /* save FPR copy to local buffer then write to the thread_struct */
339 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
340 buf[i] = task->thread.TS_FPR(i);
341 memcpy(&buf[i], &task->thread.fpscr, sizeof(double));
342 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
343}
344
345unsigned long copy_fpr_from_user(struct task_struct *task,
346 void __user *from)
347{
348 double buf[ELF_NFPREG];
349 int i;
350
351 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
352 return 1;
353 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
354 task->thread.TS_FPR(i) = buf[i];
355 memcpy(&task->thread.fpscr, &buf[i], sizeof(double));
356
357 return 0;
358}
359
360unsigned long copy_vsx_to_user(void __user *to,
361 struct task_struct *task)
362{
363 double buf[ELF_NVSRHALFREG];
364 int i;
365
366 /* save FPR copy to local buffer then write to the thread_struct */
367 for (i = 0; i < ELF_NVSRHALFREG; i++)
368 buf[i] = task->thread.fpr[i][TS_VSRLOWOFFSET];
369 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
370}
371
372unsigned long copy_vsx_from_user(struct task_struct *task,
373 void __user *from)
374{
375 double buf[ELF_NVSRHALFREG];
376 int i;
377
378 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
379 return 1;
380 for (i = 0; i < ELF_NVSRHALFREG ; i++)
381 task->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
382 return 0;
383}
384#else
385inline unsigned long copy_fpr_to_user(void __user *to,
386 struct task_struct *task)
387{
388 return __copy_to_user(to, task->thread.fpr,
389 ELF_NFPREG * sizeof(double));
390}
391
392inline unsigned long copy_fpr_from_user(struct task_struct *task,
393 void __user *from)
394{
395 return __copy_from_user(task->thread.fpr, from,
396 ELF_NFPREG * sizeof(double));
397}
398#endif
399
331/* 400/*
332 * Save the current user registers on the user stack. 401 * Save the current user registers on the user stack.
333 * We only save the altivec/spe registers if the process has used 402 * We only save the altivec/spe registers if the process has used
@@ -337,10 +406,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
337 int sigret) 406 int sigret)
338{ 407{
339 unsigned long msr = regs->msr; 408 unsigned long msr = regs->msr;
340#ifdef CONFIG_VSX
341 double buf[32];
342 int i;
343#endif
344 409
345 /* Make sure floating point registers are stored in regs */ 410 /* Make sure floating point registers are stored in regs */
346 flush_fp_to_thread(current); 411 flush_fp_to_thread(current);
@@ -370,14 +435,9 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
370 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32])) 435 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
371 return 1; 436 return 1;
372#endif /* CONFIG_ALTIVEC */ 437#endif /* CONFIG_ALTIVEC */
373#ifdef CONFIG_VSX 438 if (copy_fpr_to_user(&frame->mc_fregs, current))
374 /* save FPR copy to local buffer then write to the thread_struct */
375 flush_fp_to_thread(current);
376 for (i = 0; i < 32 ; i++)
377 buf[i] = current->thread.TS_FPR(i);
378 memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
379 if (__copy_to_user(&frame->mc_fregs, buf, ELF_NFPREG * sizeof(double)))
380 return 1; 439 return 1;
440#ifdef CONFIG_VSX
381 /* 441 /*
382 * Copy VSR 0-31 upper half from thread_struct to local 442 * Copy VSR 0-31 upper half from thread_struct to local
383 * buffer, then write that to userspace. Also set MSR_VSX in 443 * buffer, then write that to userspace. Also set MSR_VSX in
@@ -386,18 +446,10 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
386 */ 446 */
387 if (current->thread.used_vsr) { 447 if (current->thread.used_vsr) {
388 flush_vsx_to_thread(current); 448 flush_vsx_to_thread(current);
389 for (i = 0; i < 32 ; i++) 449 if (copy_vsx_to_user(&frame->mc_vsregs, current))
390 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
391 if (__copy_to_user(&frame->mc_vsregs, buf,
392 ELF_NVSRHALFREG * sizeof(double)))
393 return 1; 450 return 1;
394 msr |= MSR_VSX; 451 msr |= MSR_VSX;
395 } 452 }
396#else
397 /* save floating-point registers */
398 if (__copy_to_user(&frame->mc_fregs, current->thread.fpr,
399 ELF_NFPREG * sizeof(double)))
400 return 1;
401#endif /* CONFIG_VSX */ 453#endif /* CONFIG_VSX */
402#ifdef CONFIG_SPE 454#ifdef CONFIG_SPE
403 /* save spe registers */ 455 /* save spe registers */
@@ -442,7 +494,6 @@ static long restore_user_regs(struct pt_regs *regs,
442 unsigned int save_r2 = 0; 494 unsigned int save_r2 = 0;
443 unsigned long msr; 495 unsigned long msr;
444#ifdef CONFIG_VSX 496#ifdef CONFIG_VSX
445 double buf[32];
446 int i; 497 int i;
447#endif 498#endif
448 499
@@ -490,13 +541,10 @@ static long restore_user_regs(struct pt_regs *regs,
490 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32])) 541 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
491 return 1; 542 return 1;
492#endif /* CONFIG_ALTIVEC */ 543#endif /* CONFIG_ALTIVEC */
544 if (copy_fpr_from_user(current, &sr->mc_fregs))
545 return 1;
493 546
494#ifdef CONFIG_VSX 547#ifdef CONFIG_VSX
495 if (__copy_from_user(buf, &sr->mc_fregs,sizeof(sr->mc_fregs)))
496 return 1;
497 for (i = 0; i < 32 ; i++)
498 current->thread.TS_FPR(i) = buf[i];
499 memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
500 /* 548 /*
501 * Force the process to reload the VSX registers from 549 * Force the process to reload the VSX registers from
502 * current->thread when it next does VSX instruction. 550 * current->thread when it next does VSX instruction.
@@ -507,18 +555,11 @@ static long restore_user_regs(struct pt_regs *regs,
507 * Restore altivec registers from the stack to a local 555 * Restore altivec registers from the stack to a local
508 * buffer, then write this out to the thread_struct 556 * buffer, then write this out to the thread_struct
509 */ 557 */
510 if (__copy_from_user(buf, &sr->mc_vsregs, 558 if (copy_vsx_from_user(current, &sr->mc_vsregs))
511 sizeof(sr->mc_vsregs)))
512 return 1; 559 return 1;
513 for (i = 0; i < 32 ; i++)
514 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
515 } else if (current->thread.used_vsr) 560 } else if (current->thread.used_vsr)
516 for (i = 0; i < 32 ; i++) 561 for (i = 0; i < 32 ; i++)
517 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0; 562 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
518#else
519 if (__copy_from_user(current->thread.fpr, &sr->mc_fregs,
520 sizeof(sr->mc_fregs)))
521 return 1;
522#endif /* CONFIG_VSX */ 563#endif /* CONFIG_VSX */
523 /* 564 /*
524 * force the process to reload the FP registers from 565 * force the process to reload the FP registers from
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 8214e57aab67..93ebfb6944b6 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -89,10 +89,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
89#endif 89#endif
90 unsigned long msr = regs->msr; 90 unsigned long msr = regs->msr;
91 long err = 0; 91 long err = 0;
92#ifdef CONFIG_VSX
93 double buf[FP_REGS_SIZE];
94 int i;
95#endif
96 92
97 flush_fp_to_thread(current); 93 flush_fp_to_thread(current);
98 94
@@ -117,12 +113,9 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
117 err |= __put_user(0, &sc->v_regs); 113 err |= __put_user(0, &sc->v_regs);
118#endif /* CONFIG_ALTIVEC */ 114#endif /* CONFIG_ALTIVEC */
119 flush_fp_to_thread(current); 115 flush_fp_to_thread(current);
116 /* copy fpr regs and fpscr */
117 err |= copy_fpr_to_user(&sc->fp_regs, current);
120#ifdef CONFIG_VSX 118#ifdef CONFIG_VSX
121 /* Copy FP to local buffer then write that out */
122 for (i = 0; i < 32 ; i++)
123 buf[i] = current->thread.TS_FPR(i);
124 memcpy(&buf[i], &current->thread.fpscr, sizeof(double));
125 err |= __copy_to_user(&sc->fp_regs, buf, FP_REGS_SIZE);
126 /* 119 /*
127 * Copy VSX low doubleword to local buffer for formatting, 120 * Copy VSX low doubleword to local buffer for formatting,
128 * then out to userspace. Update v_regs to point after the 121 * then out to userspace. Update v_regs to point after the
@@ -131,17 +124,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
131 if (current->thread.used_vsr) { 124 if (current->thread.used_vsr) {
132 flush_vsx_to_thread(current); 125 flush_vsx_to_thread(current);
133 v_regs += ELF_NVRREG; 126 v_regs += ELF_NVRREG;
134 for (i = 0; i < 32 ; i++) 127 err |= copy_vsx_to_user(v_regs, current);
135 buf[i] = current->thread.fpr[i][TS_VSRLOWOFFSET];
136 err |= __copy_to_user(v_regs, buf, 32 * sizeof(double));
137 /* set MSR_VSX in the MSR value in the frame to 128 /* set MSR_VSX in the MSR value in the frame to
138 * indicate that sc->vs_reg) contains valid data. 129 * indicate that sc->vs_reg) contains valid data.
139 */ 130 */
140 msr |= MSR_VSX; 131 msr |= MSR_VSX;
141 } 132 }
142#else /* CONFIG_VSX */
143 /* copy fpr regs and fpscr */
144 err |= __copy_to_user(&sc->fp_regs, &current->thread.fpr, FP_REGS_SIZE);
145#endif /* CONFIG_VSX */ 133#endif /* CONFIG_VSX */
146 err |= __put_user(&sc->gp_regs, &sc->regs); 134 err |= __put_user(&sc->gp_regs, &sc->regs);
147 WARN_ON(!FULL_REGS(regs)); 135 WARN_ON(!FULL_REGS(regs));
@@ -165,13 +153,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
165#ifdef CONFIG_ALTIVEC 153#ifdef CONFIG_ALTIVEC
166 elf_vrreg_t __user *v_regs; 154 elf_vrreg_t __user *v_regs;
167#endif 155#endif
168#ifdef CONFIG_VSX
169 double buf[FP_REGS_SIZE];
170 int i;
171#endif
172 unsigned long err = 0; 156 unsigned long err = 0;
173 unsigned long save_r13 = 0; 157 unsigned long save_r13 = 0;
174 unsigned long msr; 158 unsigned long msr;
159#ifdef CONFIG_VSX
160 int i;
161#endif
175 162
176 /* If this is not a signal return, we preserve the TLS in r13 */ 163 /* If this is not a signal return, we preserve the TLS in r13 */
177 if (!sig) 164 if (!sig)
@@ -234,15 +221,9 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
234 else 221 else
235 current->thread.vrsave = 0; 222 current->thread.vrsave = 0;
236#endif /* CONFIG_ALTIVEC */ 223#endif /* CONFIG_ALTIVEC */
237#ifdef CONFIG_VSX
238 /* restore floating point */ 224 /* restore floating point */
239 err |= __copy_from_user(buf, &sc->fp_regs, FP_REGS_SIZE); 225 err |= copy_fpr_from_user(current, &sc->fp_regs);
240 if (err) 226#ifdef CONFIG_VSX
241 return err;
242 for (i = 0; i < 32 ; i++)
243 current->thread.TS_FPR(i) = buf[i];
244 memcpy(&current->thread.fpscr, &buf[i], sizeof(double));
245
246 /* 227 /*
247 * Get additional VSX data. Update v_regs to point after the 228 * Get additional VSX data. Update v_regs to point after the
248 * VMX data. Copy VSX low doubleword from userspace to local 229 * VMX data. Copy VSX low doubleword from userspace to local
@@ -250,14 +231,12 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
250 */ 231 */
251 v_regs += ELF_NVRREG; 232 v_regs += ELF_NVRREG;
252 if ((msr & MSR_VSX) != 0) 233 if ((msr & MSR_VSX) != 0)
253 err |= __copy_from_user(buf, v_regs, 32 * sizeof(double)); 234 err |= copy_vsx_from_user(current, v_regs);
254 else 235 else
255 memset(buf, 0, 32 * sizeof(double)); 236 for (i = 0; i < 32 ; i++)
237 current->thread.fpr[i][TS_VSRLOWOFFSET] = 0;
256 238
257 for (i = 0; i < 32 ; i++)
258 current->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
259#else 239#else
260 err |= __copy_from_user(&current->thread.fpr, &sc->fp_regs, FP_REGS_SIZE);
261#endif 240#endif
262 return err; 241 return err;
263} 242}
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index ce245a850db2..f177c60ea766 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -571,6 +571,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
571 if (start64) 571 if (start64)
572 do_feature_fixups(powerpc_firmware_features, 572 do_feature_fixups(powerpc_firmware_features,
573 start64, start64 + size64); 573 start64, start64 + size64);
574
575 start64 = find_section64(v64->hdr, "__lwsync_fixup", &size64);
576 if (start64)
577 do_lwsync_fixups(cur_cpu_spec->cpu_features,
578 start64, start64 + size64);
574#endif /* CONFIG_PPC64 */ 579#endif /* CONFIG_PPC64 */
575 580
576 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32); 581 start32 = find_section32(v32->hdr, "__ftr_fixup", &size32);
@@ -585,6 +590,11 @@ static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
585 start32, start32 + size32); 590 start32, start32 + size32);
586#endif /* CONFIG_PPC64 */ 591#endif /* CONFIG_PPC64 */
587 592
593 start32 = find_section32(v32->hdr, "__lwsync_fixup", &size32);
594 if (start32)
595 do_lwsync_fixups(cur_cpu_spec->cpu_features,
596 start32, start32 + size32);
597
588 return 0; 598 return 0;
589} 599}
590 600
diff --git a/arch/powerpc/kernel/vdso32/vdso32.lds.S b/arch/powerpc/kernel/vdso32/vdso32.lds.S
index 271793577cdc..be3b6a41dc09 100644
--- a/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -33,6 +33,9 @@ SECTIONS
33 . = ALIGN(8); 33 . = ALIGN(8);
34 __ftr_fixup : { *(__ftr_fixup) } 34 __ftr_fixup : { *(__ftr_fixup) }
35 35
36 . = ALIGN(8);
37 __lwsync_fixup : { *(__lwsync_fixup) }
38
36#ifdef CONFIG_PPC64 39#ifdef CONFIG_PPC64
37 . = ALIGN(8); 40 . = ALIGN(8);
38 __fw_ftr_fixup : { *(__fw_ftr_fixup) } 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) }
diff --git a/arch/powerpc/kernel/vdso64/vdso64.lds.S b/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e608d1bd3bff..d0b2526dd38d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -35,6 +35,9 @@ SECTIONS
35 __ftr_fixup : { *(__ftr_fixup) } 35 __ftr_fixup : { *(__ftr_fixup) }
36 36
37 . = ALIGN(8); 37 . = ALIGN(8);
38 __lwsync_fixup : { *(__lwsync_fixup) }
39
40 . = ALIGN(8);
38 __fw_ftr_fixup : { *(__fw_ftr_fixup) } 41 __fw_ftr_fixup : { *(__fw_ftr_fixup) }
39 42
40 /* 43 /*
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 3c07811989fc..6856f6c15727 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -127,6 +127,12 @@ SECTIONS
127 *(__ftr_fixup) 127 *(__ftr_fixup)
128 __stop___ftr_fixup = .; 128 __stop___ftr_fixup = .;
129 } 129 }
130 . = ALIGN(8);
131 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
132 __start___lwsync_fixup = .;
133 *(__lwsync_fixup)
134 __stop___lwsync_fixup = .;
135 }
130#ifdef CONFIG_PPC64 136#ifdef CONFIG_PPC64
131 . = ALIGN(8); 137 . = ALIGN(8);
132 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) { 138 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S
index 10d038b501a6..cb737484c5aa 100644
--- a/arch/powerpc/lib/feature-fixups-test.S
+++ b/arch/powerpc/lib/feature-fixups-test.S
@@ -10,6 +10,7 @@
10 10
11#include <asm/feature-fixups.h> 11#include <asm/feature-fixups.h>
12#include <asm/ppc_asm.h> 12#include <asm/ppc_asm.h>
13#include <asm/synch.h>
13 14
14 .text 15 .text
15 16
@@ -139,14 +140,14 @@ globl(ftr_fixup_test6)
1391: or 1,1,1 1401: or 1,1,1
140BEGIN_FTR_SECTION 141BEGIN_FTR_SECTION
141 or 5,5,5 142 or 5,5,5
1422: cmpdi r3,0 1432: PPC_LCMPI r3,0
143 beq 4f 144 beq 4f
144 blt 2b 145 blt 2b
145 b 1b 146 b 1b
146 b 4f 147 b 4f
147FTR_SECTION_ELSE 148FTR_SECTION_ELSE
1482: or 2,2,2 1492: or 2,2,2
149 cmpdi r3,1 150 PPC_LCMPI r3,1
150 beq 3f 151 beq 3f
151 blt 2b 152 blt 2b
152 b 3f 153 b 3f
@@ -161,7 +162,7 @@ globl(end_ftr_fixup_test6)
161globl(ftr_fixup_test6_expected) 162globl(ftr_fixup_test6_expected)
1621: or 1,1,1 1631: or 1,1,1
1632: or 2,2,2 1642: or 2,2,2
164 cmpdi r3,1 165 PPC_LCMPI r3,1
165 beq 3f 166 beq 3f
166 blt 2b 167 blt 2b
167 b 3f 168 b 3f
@@ -725,3 +726,17 @@ MAKE_MACRO_TEST_EXPECTED(FTR);
725MAKE_MACRO_TEST(FW_FTR); 726MAKE_MACRO_TEST(FW_FTR);
726MAKE_MACRO_TEST_EXPECTED(FW_FTR); 727MAKE_MACRO_TEST_EXPECTED(FW_FTR);
727#endif 728#endif
729
730globl(lwsync_fixup_test)
7311: or 1,1,1
732 LWSYNC
733globl(end_lwsync_fixup_test)
734
735globl(lwsync_fixup_test_expected_LWSYNC)
7361: or 1,1,1
737 lwsync
738
739globl(lwsync_fixup_test_expected_SYNC)
7401: or 1,1,1
741 sync
742
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 48e1ed89052d..4e43702b9813 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -110,6 +110,22 @@ void do_feature_fixups(unsigned long value, void *fixup_start, void *fixup_end)
110 } 110 }
111} 111}
112 112
113void do_lwsync_fixups(unsigned long value, void *fixup_start, void *fixup_end)
114{
115 unsigned int *start, *end, *dest;
116
117 if (!(value & CPU_FTR_LWSYNC))
118 return ;
119
120 start = fixup_start;
121 end = fixup_end;
122
123 for (; start < end; start++) {
124 dest = (void *)start + *start;
125 patch_instruction(dest, PPC_LWSYNC_INSTR);
126 }
127}
128
113#ifdef CONFIG_FTR_FIXUP_SELFTEST 129#ifdef CONFIG_FTR_FIXUP_SELFTEST
114 130
115#define check(x) \ 131#define check(x) \
@@ -295,6 +311,25 @@ static void test_fw_macros(void)
295#endif 311#endif
296} 312}
297 313
314static void test_lwsync_macros(void)
315{
316 extern void lwsync_fixup_test;
317 extern void end_lwsync_fixup_test;
318 extern void lwsync_fixup_test_expected_LWSYNC;
319 extern void lwsync_fixup_test_expected_SYNC;
320 unsigned long size = &end_lwsync_fixup_test -
321 &lwsync_fixup_test;
322
323 /* The fixups have already been done for us during boot */
324 if (cur_cpu_spec->cpu_features & CPU_FTR_LWSYNC) {
325 check(memcmp(&lwsync_fixup_test,
326 &lwsync_fixup_test_expected_LWSYNC, size) == 0);
327 } else {
328 check(memcmp(&lwsync_fixup_test,
329 &lwsync_fixup_test_expected_SYNC, size) == 0);
330 }
331}
332
298static int __init test_feature_fixups(void) 333static int __init test_feature_fixups(void)
299{ 334{
300 printk(KERN_DEBUG "Running feature fixup self-tests ...\n"); 335 printk(KERN_DEBUG "Running feature fixup self-tests ...\n");
@@ -307,6 +342,7 @@ static int __init test_feature_fixups(void)
307 test_alternative_case_with_external_branch(); 342 test_alternative_case_with_external_branch();
308 test_cpu_macros(); 343 test_cpu_macros();
309 test_fw_macros(); 344 test_fw_macros();
345 test_lwsync_macros();
310 346
311 return 0; 347 return 0;
312} 348}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 51f82d83bf14..776ba6ad5e1e 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -44,6 +44,7 @@
44#include <asm/btext.h> 44#include <asm/btext.h>
45#include <asm/tlb.h> 45#include <asm/tlb.h>
46#include <asm/sections.h> 46#include <asm/sections.h>
47#include <asm/sparsemem.h>
47#include <asm/vdso.h> 48#include <asm/vdso.h>
48#include <asm/fixmap.h> 49#include <asm/fixmap.h>
49 50
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index dc704da363eb..cf4bffba6f7c 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -268,6 +268,144 @@ static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
268 return result; 268 return result;
269} 269}
270 270
271struct of_drconf_cell {
272 u64 base_addr;
273 u32 drc_index;
274 u32 reserved;
275 u32 aa_index;
276 u32 flags;
277};
278
279#define DRCONF_MEM_ASSIGNED 0x00000008
280#define DRCONF_MEM_AI_INVALID 0x00000040
281#define DRCONF_MEM_RESERVED 0x00000080
282
283/*
284 * Read the next lmb list entry from the ibm,dynamic-memory property
285 * and return the information in the provided of_drconf_cell structure.
286 */
287static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
288{
289 const u32 *cp;
290
291 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
292
293 cp = *cellp;
294 drmem->drc_index = cp[0];
295 drmem->reserved = cp[1];
296 drmem->aa_index = cp[2];
297 drmem->flags = cp[3];
298
299 *cellp = cp + 4;
300}
301
302/*
303 * Retreive and validate the ibm,dynamic-memory property of the device tree.
304 *
305 * The layout of the ibm,dynamic-memory property is a number N of lmb
306 * list entries followed by N lmb list entries. Each lmb list entry
307 * contains information as layed out in the of_drconf_cell struct above.
308 */
309static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
310{
311 const u32 *prop;
312 u32 len, entries;
313
314 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
315 if (!prop || len < sizeof(unsigned int))
316 return 0;
317
318 entries = *prop++;
319
320 /* Now that we know the number of entries, revalidate the size
321 * of the property read in to ensure we have everything
322 */
323 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
324 return 0;
325
326 *dm = prop;
327 return entries;
328}
329
330/*
331 * Retreive and validate the ibm,lmb-size property for drconf memory
332 * from the device tree.
333 */
334static u64 of_get_lmb_size(struct device_node *memory)
335{
336 const u32 *prop;
337 u32 len;
338
339 prop = of_get_property(memory, "ibm,lmb-size", &len);
340 if (!prop || len < sizeof(unsigned int))
341 return 0;
342
343 return read_n_cells(n_mem_size_cells, &prop);
344}
345
346struct assoc_arrays {
347 u32 n_arrays;
348 u32 array_sz;
349 const u32 *arrays;
350};
351
352/*
353 * Retreive and validate the list of associativity arrays for drconf
354 * memory from the ibm,associativity-lookup-arrays property of the
355 * device tree..
356 *
357 * The layout of the ibm,associativity-lookup-arrays property is a number N
358 * indicating the number of associativity arrays, followed by a number M
359 * indicating the size of each associativity array, followed by a list
360 * of N associativity arrays.
361 */
362static int of_get_assoc_arrays(struct device_node *memory,
363 struct assoc_arrays *aa)
364{
365 const u32 *prop;
366 u32 len;
367
368 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
369 if (!prop || len < 2 * sizeof(unsigned int))
370 return -1;
371
372 aa->n_arrays = *prop++;
373 aa->array_sz = *prop++;
374
375 /* Now that we know the number of arrrays and size of each array,
376 * revalidate the size of the property read in.
377 */
378 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
379 return -1;
380
381 aa->arrays = prop;
382 return 0;
383}
384
385/*
386 * This is like of_node_to_nid_single() for memory represented in the
387 * ibm,dynamic-reconfiguration-memory node.
388 */
389static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
390 struct assoc_arrays *aa)
391{
392 int default_nid = 0;
393 int nid = default_nid;
394 int index;
395
396 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
397 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
398 drmem->aa_index < aa->n_arrays) {
399 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
400 nid = aa->arrays[index];
401
402 if (nid == 0xffff || nid >= MAX_NUMNODES)
403 nid = default_nid;
404 }
405
406 return nid;
407}
408
271/* 409/*
272 * Figure out to which domain a cpu belongs and stick it there. 410 * Figure out to which domain a cpu belongs and stick it there.
273 * Return the id of the domain used. 411 * Return the id of the domain used.
@@ -355,57 +493,50 @@ static unsigned long __init numa_enforce_memory_limit(unsigned long start,
355 */ 493 */
356static void __init parse_drconf_memory(struct device_node *memory) 494static void __init parse_drconf_memory(struct device_node *memory)
357{ 495{
358 const unsigned int *lm, *dm, *aa; 496 const u32 *dm;
359 unsigned int ls, ld, la; 497 unsigned int n, rc;
360 unsigned int n, aam, aalen; 498 unsigned long lmb_size, size;
361 unsigned long lmb_size, size, start; 499 int nid;
362 int nid, default_nid = 0; 500 struct assoc_arrays aa;
363 unsigned int ai, flags; 501
364 502 n = of_get_drconf_memory(memory, &dm);
365 lm = of_get_property(memory, "ibm,lmb-size", &ls); 503 if (!n)
366 dm = of_get_property(memory, "ibm,dynamic-memory", &ld); 504 return;
367 aa = of_get_property(memory, "ibm,associativity-lookup-arrays", &la); 505
368 if (!lm || !dm || !aa || 506 lmb_size = of_get_lmb_size(memory);
369 ls < sizeof(unsigned int) || ld < sizeof(unsigned int) || 507 if (!lmb_size)
370 la < 2 * sizeof(unsigned int))
371 return; 508 return;
372 509
373 lmb_size = read_n_cells(n_mem_size_cells, &lm); 510 rc = of_get_assoc_arrays(memory, &aa);
374 n = *dm++; /* number of LMBs */ 511 if (rc)
375 aam = *aa++; /* number of associativity lists */
376 aalen = *aa++; /* length of each associativity list */
377 if (ld < (n * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int) ||
378 la < (aam * aalen + 2) * sizeof(unsigned int))
379 return; 512 return;
380 513
381 for (; n != 0; --n) { 514 for (; n != 0; --n) {
382 start = read_n_cells(n_mem_addr_cells, &dm); 515 struct of_drconf_cell drmem;
383 ai = dm[2]; 516
384 flags = dm[3]; 517 read_drconf_cell(&drmem, &dm);
385 dm += 4; 518
386 /* 0x80 == reserved, 0x8 = assigned to us */ 519 /* skip this block if the reserved bit is set in flags (0x80)
387 if ((flags & 0x80) || !(flags & 0x8)) 520 or if the block is not assigned to this partition (0x8) */
521 if ((drmem.flags & DRCONF_MEM_RESERVED)
522 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
388 continue; 523 continue;
389 nid = default_nid;
390 /* flags & 0x40 means associativity index is invalid */
391 if (min_common_depth > 0 && min_common_depth <= aalen &&
392 (flags & 0x40) == 0 && ai < aam) {
393 /* this is like of_node_to_nid_single */
394 nid = aa[ai * aalen + min_common_depth - 1];
395 if (nid == 0xffff || nid >= MAX_NUMNODES)
396 nid = default_nid;
397 }
398 524
399 fake_numa_create_new_node(((start + lmb_size) >> PAGE_SHIFT), 525 nid = of_drconf_to_nid_single(&drmem, &aa);
400 &nid); 526
527 fake_numa_create_new_node(
528 ((drmem.base_addr + lmb_size) >> PAGE_SHIFT),
529 &nid);
530
401 node_set_online(nid); 531 node_set_online(nid);
402 532
403 size = numa_enforce_memory_limit(start, lmb_size); 533 size = numa_enforce_memory_limit(drmem.base_addr, lmb_size);
404 if (!size) 534 if (!size)
405 continue; 535 continue;
406 536
407 add_active_range(nid, start >> PAGE_SHIFT, 537 add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
408 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); 538 (drmem.base_addr >> PAGE_SHIFT)
539 + (size >> PAGE_SHIFT));
409 } 540 }
410} 541}
411 542
@@ -770,6 +901,79 @@ early_param("numa", early_numa);
770 901
771#ifdef CONFIG_MEMORY_HOTPLUG 902#ifdef CONFIG_MEMORY_HOTPLUG
772/* 903/*
904 * Validate the node associated with the memory section we are
905 * trying to add.
906 */
907int valid_hot_add_scn(int *nid, unsigned long start, u32 lmb_size,
908 unsigned long scn_addr)
909{
910 nodemask_t nodes;
911
912 if (*nid < 0 || !node_online(*nid))
913 *nid = any_online_node(NODE_MASK_ALL);
914
915 if ((scn_addr >= start) && (scn_addr < (start + lmb_size))) {
916 nodes_setall(nodes);
917 while (NODE_DATA(*nid)->node_spanned_pages == 0) {
918 node_clear(*nid, nodes);
919 *nid = any_online_node(nodes);
920 }
921
922 return 1;
923 }
924
925 return 0;
926}
927
928/*
929 * Find the node associated with a hot added memory section represented
930 * by the ibm,dynamic-reconfiguration-memory node.
931 */
932static int hot_add_drconf_scn_to_nid(struct device_node *memory,
933 unsigned long scn_addr)
934{
935 const u32 *dm;
936 unsigned int n, rc;
937 unsigned long lmb_size;
938 int default_nid = any_online_node(NODE_MASK_ALL);
939 int nid;
940 struct assoc_arrays aa;
941
942 n = of_get_drconf_memory(memory, &dm);
943 if (!n)
944 return default_nid;;
945
946 lmb_size = of_get_lmb_size(memory);
947 if (!lmb_size)
948 return default_nid;
949
950 rc = of_get_assoc_arrays(memory, &aa);
951 if (rc)
952 return default_nid;
953
954 for (; n != 0; --n) {
955 struct of_drconf_cell drmem;
956
957 read_drconf_cell(&drmem, &dm);
958
959 /* skip this block if it is reserved or not assigned to
960 * this partition */
961 if ((drmem.flags & DRCONF_MEM_RESERVED)
962 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
963 continue;
964
965 nid = of_drconf_to_nid_single(&drmem, &aa);
966
967 if (valid_hot_add_scn(&nid, drmem.base_addr, lmb_size,
968 scn_addr))
969 return nid;
970 }
971
972 BUG(); /* section address should be found above */
973 return 0;
974}
975
976/*
773 * Find the node associated with a hot added memory section. Section 977 * Find the node associated with a hot added memory section. Section
774 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that 978 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
775 * sections are fully contained within a single LMB. 979 * sections are fully contained within a single LMB.
@@ -777,12 +981,17 @@ early_param("numa", early_numa);
777int hot_add_scn_to_nid(unsigned long scn_addr) 981int hot_add_scn_to_nid(unsigned long scn_addr)
778{ 982{
779 struct device_node *memory = NULL; 983 struct device_node *memory = NULL;
780 nodemask_t nodes;
781 int default_nid = any_online_node(NODE_MASK_ALL);
782 int nid; 984 int nid;
783 985
784 if (!numa_enabled || (min_common_depth < 0)) 986 if (!numa_enabled || (min_common_depth < 0))
785 return default_nid; 987 return any_online_node(NODE_MASK_ALL);
988
989 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
990 if (memory) {
991 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
992 of_node_put(memory);
993 return nid;
994 }
786 995
787 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { 996 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
788 unsigned long start, size; 997 unsigned long start, size;
@@ -801,13 +1010,9 @@ ha_new_range:
801 size = read_n_cells(n_mem_size_cells, &memcell_buf); 1010 size = read_n_cells(n_mem_size_cells, &memcell_buf);
802 nid = of_node_to_nid_single(memory); 1011 nid = of_node_to_nid_single(memory);
803 1012
804 /* Domains not present at boot default to 0 */ 1013 if (valid_hot_add_scn(&nid, start, size, scn_addr)) {
805 if (nid < 0 || !node_online(nid))
806 nid = default_nid;
807
808 if ((scn_addr >= start) && (scn_addr < (start + size))) {
809 of_node_put(memory); 1014 of_node_put(memory);
810 goto got_nid; 1015 return nid;
811 } 1016 }
812 1017
813 if (--ranges) /* process all ranges in cell */ 1018 if (--ranges) /* process all ranges in cell */
@@ -815,14 +1020,5 @@ ha_new_range:
815 } 1020 }
816 BUG(); /* section address should be found above */ 1021 BUG(); /* section address should be found above */
817 return 0; 1022 return 0;
818
819 /* Temporary code to ensure that returned node is not empty */
820got_nid:
821 nodes_setall(nodes);
822 while (NODE_DATA(nid)->node_spanned_pages == 0) {
823 node_clear(nid, nodes);
824 nid = any_online_node(nodes);
825 }
826 return nid;
827} 1023}
828#endif /* CONFIG_MEMORY_HOTPLUG */ 1024#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 3c5727dd5aa5..a1a368dd2d99 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -15,34 +15,13 @@
15#include <asm/machdep.h> 15#include <asm/machdep.h>
16#include <asm/pSeries_reconfig.h> 16#include <asm/pSeries_reconfig.h>
17 17
18static int pseries_remove_memory(struct device_node *np) 18static int pseries_remove_lmb(unsigned long base, unsigned int lmb_size)
19{ 19{
20 const char *type; 20 unsigned long start, start_pfn;
21 const unsigned int *my_index;
22 const unsigned int *regs;
23 u64 start_pfn, start;
24 struct zone *zone; 21 struct zone *zone;
25 int ret = -EINVAL; 22 int ret;
26
27 /*
28 * Check to see if we are actually removing memory
29 */
30 type = of_get_property(np, "device_type", NULL);
31 if (type == NULL || strcmp(type, "memory") != 0)
32 return 0;
33 23
34 /* 24 start_pfn = base >> PFN_SECTION_SHIFT;
35 * Find the memory index and size of the removing section
36 */
37 my_index = of_get_property(np, "ibm,my-drc-index", NULL);
38 if (!my_index)
39 return ret;
40
41 regs = of_get_property(np, "reg", NULL);
42 if (!regs)
43 return ret;
44
45 start_pfn = section_nr_to_pfn(*my_index & 0xffff);
46 zone = page_zone(pfn_to_page(start_pfn)); 25 zone = page_zone(pfn_to_page(start_pfn));
47 26
48 /* 27 /*
@@ -54,56 +33,111 @@ static int pseries_remove_memory(struct device_node *np)
54 * to sysfs "state" file and we can't remove sysfs entries 33 * to sysfs "state" file and we can't remove sysfs entries
55 * while writing to it. So we have to defer it to here. 34 * while writing to it. So we have to defer it to here.
56 */ 35 */
57 ret = __remove_pages(zone, start_pfn, regs[3] >> PAGE_SHIFT); 36 ret = __remove_pages(zone, start_pfn, lmb_size >> PAGE_SHIFT);
58 if (ret) 37 if (ret)
59 return ret; 38 return ret;
60 39
61 /* 40 /*
62 * Update memory regions for memory remove 41 * Update memory regions for memory remove
63 */ 42 */
64 lmb_remove(start_pfn << PAGE_SHIFT, regs[3]); 43 lmb_remove(base, lmb_size);
65 44
66 /* 45 /*
67 * Remove htab bolted mappings for this section of memory 46 * Remove htab bolted mappings for this section of memory
68 */ 47 */
69 start = (unsigned long)__va(start_pfn << PAGE_SHIFT); 48 start = (unsigned long)__va(base);
70 ret = remove_section_mapping(start, start + regs[3]); 49 ret = remove_section_mapping(start, start + lmb_size);
71 return ret; 50 return ret;
72} 51}
73 52
74static int pseries_add_memory(struct device_node *np) 53static int pseries_remove_memory(struct device_node *np)
75{ 54{
76 const char *type; 55 const char *type;
77 const unsigned int *my_index;
78 const unsigned int *regs; 56 const unsigned int *regs;
79 u64 start_pfn; 57 unsigned long base;
58 unsigned int lmb_size;
80 int ret = -EINVAL; 59 int ret = -EINVAL;
81 60
82 /* 61 /*
83 * Check to see if we are actually adding memory 62 * Check to see if we are actually removing memory
84 */ 63 */
85 type = of_get_property(np, "device_type", NULL); 64 type = of_get_property(np, "device_type", NULL);
86 if (type == NULL || strcmp(type, "memory") != 0) 65 if (type == NULL || strcmp(type, "memory") != 0)
87 return 0; 66 return 0;
88 67
89 /* 68 /*
90 * Find the memory index and size of the added section 69 * Find the bae address and size of the lmb
91 */ 70 */
92 my_index = of_get_property(np, "ibm,my-drc-index", NULL); 71 regs = of_get_property(np, "reg", NULL);
93 if (!my_index) 72 if (!regs)
94 return ret; 73 return ret;
95 74
75 base = *(unsigned long *)regs;
76 lmb_size = regs[3];
77
78 ret = pseries_remove_lmb(base, lmb_size);
79 return ret;
80}
81
82static int pseries_add_memory(struct device_node *np)
83{
84 const char *type;
85 const unsigned int *regs;
86 unsigned long base;
87 unsigned int lmb_size;
88 int ret = -EINVAL;
89
90 /*
91 * Check to see if we are actually adding memory
92 */
93 type = of_get_property(np, "device_type", NULL);
94 if (type == NULL || strcmp(type, "memory") != 0)
95 return 0;
96
97 /*
98 * Find the base and size of the lmb
99 */
96 regs = of_get_property(np, "reg", NULL); 100 regs = of_get_property(np, "reg", NULL);
97 if (!regs) 101 if (!regs)
98 return ret; 102 return ret;
99 103
100 start_pfn = section_nr_to_pfn(*my_index & 0xffff); 104 base = *(unsigned long *)regs;
105 lmb_size = regs[3];
101 106
102 /* 107 /*
103 * Update memory region to represent the memory add 108 * Update memory region to represent the memory add
104 */ 109 */
105 lmb_add(start_pfn << PAGE_SHIFT, regs[3]); 110 ret = lmb_add(base, lmb_size);
106 return 0; 111 return (ret < 0) ? -EINVAL : 0;
112}
113
114static int pseries_drconf_memory(unsigned long *base, unsigned int action)
115{
116 struct device_node *np;
117 const unsigned long *lmb_size;
118 int rc;
119
120 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
121 if (!np)
122 return -EINVAL;
123
124 lmb_size = of_get_property(np, "ibm,lmb-size", NULL);
125 if (!lmb_size) {
126 of_node_put(np);
127 return -EINVAL;
128 }
129
130 if (action == PSERIES_DRCONF_MEM_ADD) {
131 rc = lmb_add(*base, *lmb_size);
132 rc = (rc < 0) ? -EINVAL : 0;
133 } else if (action == PSERIES_DRCONF_MEM_REMOVE) {
134 rc = pseries_remove_lmb(*base, *lmb_size);
135 } else {
136 rc = -EINVAL;
137 }
138
139 of_node_put(np);
140 return rc;
107} 141}
108 142
109static int pseries_memory_notifier(struct notifier_block *nb, 143static int pseries_memory_notifier(struct notifier_block *nb,
@@ -120,6 +154,11 @@ static int pseries_memory_notifier(struct notifier_block *nb,
120 if (pseries_remove_memory(node)) 154 if (pseries_remove_memory(node))
121 err = NOTIFY_BAD; 155 err = NOTIFY_BAD;
122 break; 156 break;
157 case PSERIES_DRCONF_MEM_ADD:
158 case PSERIES_DRCONF_MEM_REMOVE:
159 if (pseries_drconf_memory(node, action))
160 err = NOTIFY_BAD;
161 break;
123 default: 162 default:
124 err = NOTIFY_DONE; 163 err = NOTIFY_DONE;
125 break; 164 break;
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 75769aae41d5..7637bd38c795 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -365,7 +365,7 @@ static char *parse_node(char *buf, size_t bufsize, struct device_node **npp)
365 *buf = '\0'; 365 *buf = '\0';
366 buf++; 366 buf++;
367 367
368 handle = simple_strtoul(handle_str, NULL, 10); 368 handle = simple_strtoul(handle_str, NULL, 0);
369 369
370 *npp = of_find_node_by_phandle(handle); 370 *npp = of_find_node_by_phandle(handle);
371 return buf; 371 return buf;
@@ -422,8 +422,8 @@ static int do_update_property(char *buf, size_t bufsize)
422{ 422{
423 struct device_node *np; 423 struct device_node *np;
424 unsigned char *value; 424 unsigned char *value;
425 char *name, *end; 425 char *name, *end, *next_prop;
426 int length; 426 int rc, length;
427 struct property *newprop, *oldprop; 427 struct property *newprop, *oldprop;
428 buf = parse_node(buf, bufsize, &np); 428 buf = parse_node(buf, bufsize, &np);
429 end = buf + bufsize; 429 end = buf + bufsize;
@@ -431,7 +431,8 @@ static int do_update_property(char *buf, size_t bufsize)
431 if (!np) 431 if (!np)
432 return -ENODEV; 432 return -ENODEV;
433 433
434 if (parse_next_property(buf, end, &name, &length, &value) == NULL) 434 next_prop = parse_next_property(buf, end, &name, &length, &value);
435 if (!next_prop)
435 return -EINVAL; 436 return -EINVAL;
436 437
437 newprop = new_property(name, length, value, NULL); 438 newprop = new_property(name, length, value, NULL);
@@ -442,7 +443,34 @@ static int do_update_property(char *buf, size_t bufsize)
442 if (!oldprop) 443 if (!oldprop)
443 return -ENODEV; 444 return -ENODEV;
444 445
445 return prom_update_property(np, newprop, oldprop); 446 rc = prom_update_property(np, newprop, oldprop);
447 if (rc)
448 return rc;
449
450 /* For memory under the ibm,dynamic-reconfiguration-memory node
451 * of the device tree, adding and removing memory is just an update
452 * to the ibm,dynamic-memory property instead of adding/removing a
453 * memory node in the device tree. For these cases we still need to
454 * involve the notifier chain.
455 */
456 if (!strcmp(name, "ibm,dynamic-memory")) {
457 int action;
458
459 next_prop = parse_next_property(next_prop, end, &name,
460 &length, &value);
461 if (!next_prop)
462 return -EINVAL;
463
464 if (!strcmp(name, "add"))
465 action = PSERIES_DRCONF_MEM_ADD;
466 else
467 action = PSERIES_DRCONF_MEM_REMOVE;
468
469 blocking_notifier_call_chain(&pSeries_reconfig_chain,
470 action, value);
471 }
472
473 return 0;
446} 474}
447 475
448/** 476/**
diff --git a/include/asm-powerpc/code-patching.h b/include/asm-powerpc/code-patching.h
index ef3a5d156dba..107d9b915e33 100644
--- a/include/asm-powerpc/code-patching.h
+++ b/include/asm-powerpc/code-patching.h
@@ -12,7 +12,8 @@
12 12
13#include <asm/types.h> 13#include <asm/types.h>
14 14
15#define PPC_NOP_INSTR 0x60000000 15#define PPC_NOP_INSTR 0x60000000
16#define PPC_LWSYNC_INSTR 0x7c2004ac
16 17
17/* Flags for create_branch: 18/* Flags for create_branch:
18 * "b" == create_branch(addr, target, 0); 19 * "b" == create_branch(addr, target, 0);
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 4e4491cb9d3b..3171ac904b91 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -156,6 +156,7 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
156#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000) 156#define CPU_FTR_UNIFIED_ID_CACHE ASM_CONST(0x0000000001000000)
157#define CPU_FTR_SPE ASM_CONST(0x0000000002000000) 157#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
158#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000) 158#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
159#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
159 160
160/* 161/*
161 * Add the 64-bit processor unique features in the top half of the word; 162 * Add the 64-bit processor unique features in the top half of the word;
@@ -369,43 +370,43 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
369 CPU_FTR_NODSISRALIGN) 370 CPU_FTR_NODSISRALIGN)
370#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 371#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
371 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \ 372 CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
372 CPU_FTR_L2CSR) 373 CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
373#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 374#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
374 375
375/* 64-bit CPUs */ 376/* 64-bit CPUs */
376#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | \ 377#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
377 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE) 378 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
378#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | \ 379#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
379 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \ 380 CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
380 CPU_FTR_MMCRA | CPU_FTR_CTRL) 381 CPU_FTR_MMCRA | CPU_FTR_CTRL)
381#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | \ 382#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
382 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 383 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
383 CPU_FTR_MMCRA) 384 CPU_FTR_MMCRA)
384#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | \ 385#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
385 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 386 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
386 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA) 387 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA)
387#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | \ 388#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
388 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 389 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
389 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 390 CPU_FTR_MMCRA | CPU_FTR_SMT | \
390 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 391 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
391 CPU_FTR_PURR) 392 CPU_FTR_PURR)
392#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | \ 393#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
393 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 394 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
394 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 395 CPU_FTR_MMCRA | CPU_FTR_SMT | \
395 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 396 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
396 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 397 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
397 CPU_FTR_DSCR) 398 CPU_FTR_DSCR)
398#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | \ 399#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
399 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 400 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
400 CPU_FTR_MMCRA | CPU_FTR_SMT | \ 401 CPU_FTR_MMCRA | CPU_FTR_SMT | \
401 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ 402 CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
402 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ 403 CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
403 CPU_FTR_DSCR) 404 CPU_FTR_DSCR)
404#define CPU_FTRS_CELL (CPU_FTR_USE_TB | \ 405#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
405 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ 406 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
406 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ 407 CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
407 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG) 408 CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | CPU_FTR_CELL_TB_BUG)
408#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | \ 409#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
409 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \ 410 CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
410 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ 411 CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
411 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) 412 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
diff --git a/include/asm-powerpc/elf.h b/include/asm-powerpc/elf.h
index 38a51728406f..89664675b469 100644
--- a/include/asm-powerpc/elf.h
+++ b/include/asm-powerpc/elf.h
@@ -204,28 +204,8 @@ static inline void ppc_elf_core_copy_regs(elf_gregset_t elf_regs,
204} 204}
205#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs); 205#define ELF_CORE_COPY_REGS(gregs, regs) ppc_elf_core_copy_regs(gregs, regs);
206 206
207static inline int dump_task_regs(struct task_struct *tsk,
208 elf_gregset_t *elf_regs)
209{
210 struct pt_regs *regs = tsk->thread.regs;
211 if (regs)
212 ppc_elf_core_copy_regs(*elf_regs, regs);
213
214 return 1;
215}
216#define ELF_CORE_COPY_TASK_REGS(tsk, elf_regs) dump_task_regs(tsk, elf_regs)
217
218extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
219#define ELF_CORE_COPY_FPREGS(tsk, elf_fpregs) dump_task_fpu(tsk, elf_fpregs)
220
221typedef elf_vrregset_t elf_fpxregset_t; 207typedef elf_vrregset_t elf_fpxregset_t;
222 208
223#ifdef CONFIG_ALTIVEC
224extern int dump_task_altivec(struct task_struct *, elf_vrregset_t *vrregs);
225#define ELF_CORE_COPY_XFPREGS(tsk, regs) dump_task_altivec(tsk, regs)
226#define ELF_CORE_XFPREG_TYPE NT_PPC_VMX
227#endif
228
229/* ELF_HWCAP yields a mask that user programs can use to figure out what 209/* ELF_HWCAP yields a mask that user programs can use to figure out what
230 instruction set this cpu supports. This could be done in userspace, 210 instruction set this cpu supports. This could be done in userspace,
231 but it's not easy, and we've already done it here. */ 211 but it's not easy, and we've already done it here. */
diff --git a/include/asm-powerpc/feature-fixups.h b/include/asm-powerpc/feature-fixups.h
index ab30129dced7..a1029967620b 100644
--- a/include/asm-powerpc/feature-fixups.h
+++ b/include/asm-powerpc/feature-fixups.h
@@ -113,4 +113,14 @@ label##5: \
113 113
114#endif /* __ASSEMBLY__ */ 114#endif /* __ASSEMBLY__ */
115 115
116/* LWSYNC feature sections */
117#define START_LWSYNC_SECTION(label) label##1:
118#define MAKE_LWSYNC_SECTION_ENTRY(label, sect) \
119label##2: \
120 .pushsection sect,"a"; \
121 .align 2; \
122label##3: \
123 .long label##1b-label##3b; \
124 .popsection;
125
116#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */ 126#endif /* __ASM_POWERPC_FEATURE_FIXUPS_H */
diff --git a/include/asm-powerpc/pSeries_reconfig.h b/include/asm-powerpc/pSeries_reconfig.h
index ea6cfb8efb84..e482e5352e69 100644
--- a/include/asm-powerpc/pSeries_reconfig.h
+++ b/include/asm-powerpc/pSeries_reconfig.h
@@ -9,8 +9,10 @@
9 * added or removed on pSeries systems. 9 * added or removed on pSeries systems.
10 */ 10 */
11 11
12#define PSERIES_RECONFIG_ADD 0x0001 12#define PSERIES_RECONFIG_ADD 0x0001
13#define PSERIES_RECONFIG_REMOVE 0x0002 13#define PSERIES_RECONFIG_REMOVE 0x0002
14#define PSERIES_DRCONF_MEM_ADD 0x0003
15#define PSERIES_DRCONF_MEM_REMOVE 0x0004
14 16
15#ifdef CONFIG_PPC_PSERIES 17#ifdef CONFIG_PPC_PSERIES
16extern int pSeries_reconfig_notifier_register(struct notifier_block *); 18extern int pSeries_reconfig_notifier_register(struct notifier_block *);
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index e93e72df4bca..061cd17ba83b 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -222,7 +222,7 @@ struct thread_struct {
222 .ksp_limit = INIT_SP_LIMIT, \ 222 .ksp_limit = INIT_SP_LIMIT, \
223 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ 223 .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \
224 .fs = KERNEL_DS, \ 224 .fs = KERNEL_DS, \
225 .fpr = {0}, \ 225 .fpr = {{0}}, \
226 .fpscr = { .val = 0, }, \ 226 .fpscr = { .val = 0, }, \
227 .fpexc_mode = 0, \ 227 .fpexc_mode = 0, \
228} 228}
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index 9aea8e9f0bd1..54a47ea2c3aa 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -13,6 +13,8 @@
13#define MAX_PHYSADDR_BITS 44 13#define MAX_PHYSADDR_BITS 44
14#define MAX_PHYSMEM_BITS 44 14#define MAX_PHYSMEM_BITS 44
15 15
16#endif /* CONFIG_SPARSEMEM */
17
16#ifdef CONFIG_MEMORY_HOTPLUG 18#ifdef CONFIG_MEMORY_HOTPLUG
17extern void create_section_mapping(unsigned long start, unsigned long end); 19extern void create_section_mapping(unsigned long start, unsigned long end);
18extern int remove_section_mapping(unsigned long start, unsigned long end); 20extern int remove_section_mapping(unsigned long start, unsigned long end);
@@ -26,7 +28,5 @@ static inline int hot_add_scn_to_nid(unsigned long scn_addr)
26#endif /* CONFIG_NUMA */ 28#endif /* CONFIG_NUMA */
27#endif /* CONFIG_MEMORY_HOTPLUG */ 29#endif /* CONFIG_MEMORY_HOTPLUG */
28 30
29#endif /* CONFIG_SPARSEMEM */
30
31#endif /* __KERNEL__ */ 31#endif /* __KERNEL__ */
32#endif /* _ASM_POWERPC_SPARSEMEM_H */ 32#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/synch.h b/include/asm-powerpc/synch.h
index 42a1ef590690..45963e80f557 100644
--- a/include/asm-powerpc/synch.h
+++ b/include/asm-powerpc/synch.h
@@ -3,34 +3,42 @@
3#ifdef __KERNEL__ 3#ifdef __KERNEL__
4 4
5#include <linux/stringify.h> 5#include <linux/stringify.h>
6#include <asm/feature-fixups.h>
6 7
7#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) 8#ifndef __ASSEMBLY__
8#define __SUBARCH_HAS_LWSYNC 9extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
9#endif 10extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
11 void *fixup_end);
12
13static inline void eieio(void)
14{
15 __asm__ __volatile__ ("eieio" : : : "memory");
16}
17
18static inline void isync(void)
19{
20 __asm__ __volatile__ ("isync" : : : "memory");
21}
22#endif /* __ASSEMBLY__ */
10 23
11#ifdef __SUBARCH_HAS_LWSYNC 24#if defined(__powerpc64__)
12# define LWSYNC lwsync 25# define LWSYNC lwsync
26#elif defined(CONFIG_E500)
27# define LWSYNC \
28 START_LWSYNC_SECTION(96); \
29 sync; \
30 MAKE_LWSYNC_SECTION_ENTRY(96, __lwsync_fixup);
13#else 31#else
14# define LWSYNC sync 32# define LWSYNC sync
15#endif 33#endif
16 34
17#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
18#define ISYNC_ON_SMP "\n\tisync\n" 36#define ISYNC_ON_SMP "\n\tisync\n"
19#define LWSYNC_ON_SMP __stringify(LWSYNC) "\n" 37#define LWSYNC_ON_SMP stringify_in_c(LWSYNC) "\n"
20#else 38#else
21#define ISYNC_ON_SMP 39#define ISYNC_ON_SMP
22#define LWSYNC_ON_SMP 40#define LWSYNC_ON_SMP
23#endif 41#endif
24 42
25static inline void eieio(void)
26{
27 __asm__ __volatile__ ("eieio" : : : "memory");
28}
29
30static inline void isync(void)
31{
32 __asm__ __volatile__ ("isync" : : : "memory");
33}
34
35#endif /* __KERNEL__ */ 43#endif /* __KERNEL__ */
36#endif /* _ASM_POWERPC_SYNCH_H */ 44#endif /* _ASM_POWERPC_SYNCH_H */