aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c79
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S60
-rw-r--r--arch/powerpc/kernel/process.c3
-rw-r--r--arch/powerpc/kernel/setup-common.c3
-rw-r--r--arch/powerpc/kernel/smp.c1
5 files changed, 118 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index dabfb7346f36..936267462cae 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -187,6 +187,7 @@ int main(void)
187 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 187 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
188 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 188 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
189 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 189 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
190 DEFINE(LPPACA_PMCINUSE, offsetof(struct lppaca, pmcregs_in_use));
190 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); 191 DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx));
191 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); 192 DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx));
192#endif /* CONFIG_PPC_STD_MMU_64 */ 193#endif /* CONFIG_PPC_STD_MMU_64 */
@@ -392,6 +393,29 @@ int main(void)
392 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 393 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
393 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 394 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
394 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 395 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
396 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
397 DEFINE(VCPU_FPSCR, offsetof(struct kvm_vcpu, arch.fpscr));
398#ifdef CONFIG_ALTIVEC
399 DEFINE(VCPU_VRS, offsetof(struct kvm_vcpu, arch.vr));
400 DEFINE(VCPU_VSCR, offsetof(struct kvm_vcpu, arch.vscr));
401#endif
402#ifdef CONFIG_VSX
403 DEFINE(VCPU_VSRS, offsetof(struct kvm_vcpu, arch.vsr));
404#endif
405 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
406 DEFINE(VCPU_CTR, offsetof(struct kvm_vcpu, arch.ctr));
407 DEFINE(VCPU_LR, offsetof(struct kvm_vcpu, arch.lr));
408 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
409 DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.pc));
410#ifdef CONFIG_KVM_BOOK3S_64_HV
411 DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.shregs.msr));
412 DEFINE(VCPU_SRR0, offsetof(struct kvm_vcpu, arch.shregs.srr0));
413 DEFINE(VCPU_SRR1, offsetof(struct kvm_vcpu, arch.shregs.srr1));
414 DEFINE(VCPU_SPRG0, offsetof(struct kvm_vcpu, arch.shregs.sprg0));
415 DEFINE(VCPU_SPRG1, offsetof(struct kvm_vcpu, arch.shregs.sprg1));
416 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
417 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
418#endif
395 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 419 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4));
396 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 420 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5));
397 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 421 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6));
@@ -403,17 +427,60 @@ int main(void)
403 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 427 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
404 428
405 /* book3s */ 429 /* book3s */
430#ifdef CONFIG_KVM_BOOK3S_64_HV
431 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
432 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
433 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
434 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
435 DEFINE(KVM_HOST_SDR1, offsetof(struct kvm, arch.host_sdr1));
436 DEFINE(KVM_TLBIE_LOCK, offsetof(struct kvm, arch.tlbie_lock));
437 DEFINE(KVM_ONLINE_CPUS, offsetof(struct kvm, online_vcpus.counter));
438 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
439 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
440 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
441#endif
406#ifdef CONFIG_PPC_BOOK3S 442#ifdef CONFIG_PPC_BOOK3S
443 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
444 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
407 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip)); 445 DEFINE(VCPU_HOST_RETIP, offsetof(struct kvm_vcpu, arch.host_retip));
408 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr)); 446 DEFINE(VCPU_HOST_MSR, offsetof(struct kvm_vcpu, arch.host_msr));
447 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
448 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
449 DEFINE(VCPU_DSCR, offsetof(struct kvm_vcpu, arch.dscr));
450 DEFINE(VCPU_AMR, offsetof(struct kvm_vcpu, arch.amr));
451 DEFINE(VCPU_UAMOR, offsetof(struct kvm_vcpu, arch.uamor));
452 DEFINE(VCPU_CTRL, offsetof(struct kvm_vcpu, arch.ctrl));
453 DEFINE(VCPU_DABR, offsetof(struct kvm_vcpu, arch.dabr));
409 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem)); 454 DEFINE(VCPU_TRAMPOLINE_LOWMEM, offsetof(struct kvm_vcpu, arch.trampoline_lowmem));
410 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter)); 455 DEFINE(VCPU_TRAMPOLINE_ENTER, offsetof(struct kvm_vcpu, arch.trampoline_enter));
411 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler)); 456 DEFINE(VCPU_HIGHMEM_HANDLER, offsetof(struct kvm_vcpu, arch.highmem_handler));
412 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall)); 457 DEFINE(VCPU_RMCALL, offsetof(struct kvm_vcpu, arch.rmcall));
413 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags)); 458 DEFINE(VCPU_HFLAGS, offsetof(struct kvm_vcpu, arch.hflags));
459 DEFINE(VCPU_DEC, offsetof(struct kvm_vcpu, arch.dec));
460 DEFINE(VCPU_DEC_EXPIRES, offsetof(struct kvm_vcpu, arch.dec_expires));
461 DEFINE(VCPU_LPCR, offsetof(struct kvm_vcpu, arch.lpcr));
462 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
463 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
464 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
465 DEFINE(VCPU_SLB_MAX, offsetof(struct kvm_vcpu, arch.slb_max));
466 DEFINE(VCPU_SLB_NR, offsetof(struct kvm_vcpu, arch.slb_nr));
467 DEFINE(VCPU_LAST_CPU, offsetof(struct kvm_vcpu, arch.last_cpu));
468 DEFINE(VCPU_FAULT_DSISR, offsetof(struct kvm_vcpu, arch.fault_dsisr));
469 DEFINE(VCPU_FAULT_DAR, offsetof(struct kvm_vcpu, arch.fault_dar));
470 DEFINE(VCPU_LAST_INST, offsetof(struct kvm_vcpu, arch.last_inst));
471 DEFINE(VCPU_TRAP, offsetof(struct kvm_vcpu, arch.trap));
472 DEFINE(VCPU_SVCPU, offsetof(struct kvmppc_vcpu_book3s, shadow_vcpu) -
473 offsetof(struct kvmppc_vcpu_book3s, vcpu));
474 DEFINE(VCPU_SLB_E, offsetof(struct kvmppc_slb, orige));
475 DEFINE(VCPU_SLB_V, offsetof(struct kvmppc_slb, origv));
476 DEFINE(VCPU_SLB_SIZE, sizeof(struct kvmppc_slb));
414 477
415#ifdef CONFIG_PPC_BOOK3S_64 478#ifdef CONFIG_PPC_BOOK3S_64
479#ifdef CONFIG_KVM_BOOK3S_PR
416# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f)) 480# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, shadow_vcpu.f))
481#else
482# define SVCPU_FIELD(x, f)
483#endif
417# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f)) 484# define HSTATE_FIELD(x, f) DEFINE(x, offsetof(struct paca_struct, kvm_hstate.f))
418#else /* 32-bit */ 485#else /* 32-bit */
419# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f)) 486# define SVCPU_FIELD(x, f) DEFINE(x, offsetof(struct kvmppc_book3s_shadow_vcpu, f))
@@ -453,11 +520,23 @@ int main(void)
453 520
454 HSTATE_FIELD(HSTATE_HOST_R1, host_r1); 521 HSTATE_FIELD(HSTATE_HOST_R1, host_r1);
455 HSTATE_FIELD(HSTATE_HOST_R2, host_r2); 522 HSTATE_FIELD(HSTATE_HOST_R2, host_r2);
523 HSTATE_FIELD(HSTATE_HOST_MSR, host_msr);
456 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); 524 HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
457 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); 525 HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
458 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); 526 HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
459 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); 527 HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
460 528
529#ifdef CONFIG_KVM_BOOK3S_64_HV
530 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
531 HSTATE_FIELD(HSTATE_MMCR, host_mmcr);
532 HSTATE_FIELD(HSTATE_PMC, host_pmc);
533 HSTATE_FIELD(HSTATE_PURR, host_purr);
534 HSTATE_FIELD(HSTATE_SPURR, host_spurr);
535 HSTATE_FIELD(HSTATE_DSCR, host_dscr);
536 HSTATE_FIELD(HSTATE_DABR, dabr);
537 HSTATE_FIELD(HSTATE_DECEXP, dec_expires);
538#endif /* CONFIG_KVM_BOOK3S_64_HV */
539
461#else /* CONFIG_PPC_BOOK3S */ 540#else /* CONFIG_PPC_BOOK3S */
462 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr)); 541 DEFINE(VCPU_CR, offsetof(struct kvm_vcpu, arch.cr));
463 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer)); 542 DEFINE(VCPU_XER, offsetof(struct kvm_vcpu, arch.xer));
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 6da00550afea..163c041cec24 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -87,14 +87,14 @@ data_access_not_stab:
87END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 87END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
88#endif 88#endif
89 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 89 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
90 KVMTEST, 0x300) 90 KVMTEST_PR, 0x300)
91 91
92 . = 0x380 92 . = 0x380
93 .globl data_access_slb_pSeries 93 .globl data_access_slb_pSeries
94data_access_slb_pSeries: 94data_access_slb_pSeries:
95 HMT_MEDIUM 95 HMT_MEDIUM
96 SET_SCRATCH0(r13) 96 SET_SCRATCH0(r13)
97 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380) 97 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380)
98 std r3,PACA_EXSLB+EX_R3(r13) 98 std r3,PACA_EXSLB+EX_R3(r13)
99 mfspr r3,SPRN_DAR 99 mfspr r3,SPRN_DAR
100#ifdef __DISABLED__ 100#ifdef __DISABLED__
@@ -125,7 +125,7 @@ data_access_slb_pSeries:
125instruction_access_slb_pSeries: 125instruction_access_slb_pSeries:
126 HMT_MEDIUM 126 HMT_MEDIUM
127 SET_SCRATCH0(r13) 127 SET_SCRATCH0(r13)
128 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x480) 128 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
129 std r3,PACA_EXSLB+EX_R3(r13) 129 std r3,PACA_EXSLB+EX_R3(r13)
130 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ 130 mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
131#ifdef __DISABLED__ 131#ifdef __DISABLED__
@@ -153,32 +153,32 @@ instruction_access_slb_pSeries:
153hardware_interrupt_pSeries: 153hardware_interrupt_pSeries:
154hardware_interrupt_hv: 154hardware_interrupt_hv:
155 BEGIN_FTR_SECTION 155 BEGIN_FTR_SECTION
156 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
157 EXC_STD, SOFTEN_TEST)
158 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
159 FTR_SECTION_ELSE
160 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, 156 _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
161 EXC_HV, SOFTEN_TEST_HV) 157 EXC_HV, SOFTEN_TEST_HV)
162 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502) 158 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
163 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) 159 FTR_SECTION_ELSE
160 _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
161 EXC_STD, SOFTEN_TEST_PR)
162 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
163 ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE_206)
164 164
165 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) 165 STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
166 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x600) 166 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
167 167
168 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) 168 STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
169 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x700) 169 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
170 170
171 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) 171 STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
172 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x800) 172 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
173 173
174 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) 174 MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
175 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) 175 MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
176 176
177 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) 177 STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
178 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xa00) 178 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
179 179
180 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) 180 STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
181 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xb00) 181 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
182 182
183 . = 0xc00 183 . = 0xc00
184 .globl system_call_pSeries 184 .globl system_call_pSeries
@@ -219,7 +219,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
219 b . 219 b .
220 220
221 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) 221 STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
222 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xd00) 222 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
223 223
224 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch 224 /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
225 * out of line to handle them 225 * out of line to handle them
@@ -254,23 +254,23 @@ vsx_unavailable_pSeries_1:
254 254
255#ifdef CONFIG_CBE_RAS 255#ifdef CONFIG_CBE_RAS
256 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) 256 STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
257 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202) 257 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
258#endif /* CONFIG_CBE_RAS */ 258#endif /* CONFIG_CBE_RAS */
259 259
260 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) 260 STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
261 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1300) 261 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
262 262
263#ifdef CONFIG_CBE_RAS 263#ifdef CONFIG_CBE_RAS
264 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) 264 STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
265 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602) 265 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
266#endif /* CONFIG_CBE_RAS */ 266#endif /* CONFIG_CBE_RAS */
267 267
268 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) 268 STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
269 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x1700) 269 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
270 270
271#ifdef CONFIG_CBE_RAS 271#ifdef CONFIG_CBE_RAS
272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) 272 STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
273 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802) 273 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
274#endif /* CONFIG_CBE_RAS */ 274#endif /* CONFIG_CBE_RAS */
275 275
276 . = 0x3000 276 . = 0x3000
@@ -297,7 +297,7 @@ data_access_check_stab:
297 mfspr r9,SPRN_DSISR 297 mfspr r9,SPRN_DSISR
298 srdi r10,r10,60 298 srdi r10,r10,60
299 rlwimi r10,r9,16,0x20 299 rlwimi r10,r9,16,0x20
300#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 300#ifdef CONFIG_KVM_BOOK3S_PR
301 lbz r9,HSTATE_IN_GUEST(r13) 301 lbz r9,HSTATE_IN_GUEST(r13)
302 rlwimi r10,r9,8,0x300 302 rlwimi r10,r9,8,0x300
303#endif 303#endif
@@ -316,11 +316,11 @@ do_stab_bolted_pSeries:
316 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 316 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
317#endif /* CONFIG_POWER4_ONLY */ 317#endif /* CONFIG_POWER4_ONLY */
318 318
319 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) 319 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300)
320 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) 320 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380)
321 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x400) 321 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
322 KVM_HANDLER(PACA_EXSLB, EXC_STD, 0x480) 322 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
323 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x900) 323 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
324 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982) 324 KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
325 325
326 .align 7 326 .align 7
@@ -336,11 +336,11 @@ do_stab_bolted_pSeries:
336 336
337 /* moved from 0xf00 */ 337 /* moved from 0xf00 */
338 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) 338 STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor)
339 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf00) 339 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
340 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) 340 STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable)
341 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf20) 341 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
342 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) 342 STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable)
343 KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xf40) 343 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
344 344
345/* 345/*
346 * An interrupt came in while soft-disabled; clear EE in SRR1, 346 * An interrupt came in while soft-disabled; clear EE in SRR1,
@@ -417,7 +417,11 @@ slb_miss_user_pseries:
417/* KVM's trampoline code needs to be close to the interrupt handlers */ 417/* KVM's trampoline code needs to be close to the interrupt handlers */
418 418
419#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 419#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
420#ifdef CONFIG_KVM_BOOK3S_PR
420#include "../kvm/book3s_rmhandlers.S" 421#include "../kvm/book3s_rmhandlers.S"
422#else
423#include "../kvm/book3s_hv_rmhandlers.S"
424#endif
421#endif 425#endif
422 426
423 .align 7 427 .align 7
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 60ac2a9251db..ec2d0edeb134 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -96,6 +96,7 @@ void flush_fp_to_thread(struct task_struct *tsk)
96 preempt_enable(); 96 preempt_enable();
97 } 97 }
98} 98}
99EXPORT_SYMBOL_GPL(flush_fp_to_thread);
99 100
100void enable_kernel_fp(void) 101void enable_kernel_fp(void)
101{ 102{
@@ -145,6 +146,7 @@ void flush_altivec_to_thread(struct task_struct *tsk)
145 preempt_enable(); 146 preempt_enable();
146 } 147 }
147} 148}
149EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
148#endif /* CONFIG_ALTIVEC */ 150#endif /* CONFIG_ALTIVEC */
149 151
150#ifdef CONFIG_VSX 152#ifdef CONFIG_VSX
@@ -186,6 +188,7 @@ void flush_vsx_to_thread(struct task_struct *tsk)
186 preempt_enable(); 188 preempt_enable();
187 } 189 }
188} 190}
191EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
189#endif /* CONFIG_VSX */ 192#endif /* CONFIG_VSX */
190 193
191#ifdef CONFIG_SPE 194#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 79fca2651b65..22051ef04bd9 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -375,6 +375,9 @@ void __init check_for_initrd(void)
375 375
376int threads_per_core, threads_shift; 376int threads_per_core, threads_shift;
377cpumask_t threads_core_mask; 377cpumask_t threads_core_mask;
378EXPORT_SYMBOL_GPL(threads_per_core);
379EXPORT_SYMBOL_GPL(threads_shift);
380EXPORT_SYMBOL_GPL(threads_core_mask);
378 381
379static void __init cpu_init_thread_core_maps(int tpc) 382static void __init cpu_init_thread_core_maps(int tpc)
380{ 383{
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ebc6700b98d..09a85a9045d6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,6 +243,7 @@ void smp_send_reschedule(int cpu)
243 if (likely(smp_ops)) 243 if (likely(smp_ops))
244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); 244 smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE);
245} 245}
246EXPORT_SYMBOL_GPL(smp_send_reschedule);
246 247
247void arch_send_call_function_single_ipi(int cpu) 248void arch_send_call_function_single_ipi(int cpu)
248{ 249{