aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
8 files changed, 88 insertions, 45 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587a8b7d..c5d148434c08 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
469 slb_v = vcpu->kvm->arch.vrma_slb_v; 469 slb_v = vcpu->kvm->arch.vrma_slb_v;
470 } 470 }
471 471
472 preempt_disable();
472 /* Find the HPTE in the hash table */ 473 /* Find the HPTE in the hash table */
473 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, 474 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
474 HPTE_V_VALID | HPTE_V_ABSENT); 475 HPTE_V_VALID | HPTE_V_ABSENT);
475 if (index < 0) 476 if (index < 0) {
477 preempt_enable();
476 return -ENOENT; 478 return -ENOENT;
479 }
477 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); 480 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
478 v = hptep[0] & ~HPTE_V_HVLOCK; 481 v = hptep[0] & ~HPTE_V_HVLOCK;
479 gr = kvm->arch.revmap[index].guest_rpte; 482 gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
481 /* Unlock the HPTE */ 484 /* Unlock the HPTE */
482 asm volatile("lwsync" : : : "memory"); 485 asm volatile("lwsync" : : : "memory");
483 hptep[0] = v; 486 hptep[0] = v;
487 preempt_enable();
484 488
485 gpte->eaddr = eaddr; 489 gpte->eaddr = eaddr;
486 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); 490 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
665 return -EFAULT; 669 return -EFAULT;
666 } else { 670 } else {
667 page = pages[0]; 671 page = pages[0];
672 pfn = page_to_pfn(page);
668 if (PageHuge(page)) { 673 if (PageHuge(page)) {
669 page = compound_head(page); 674 page = compound_head(page);
670 pte_size <<= compound_order(page); 675 pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
689 } 694 }
690 rcu_read_unlock_sched(); 695 rcu_read_unlock_sched();
691 } 696 }
692 pfn = page_to_pfn(page);
693 } 697 }
694 698
695 ret = -EFAULT; 699 ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
707 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; 711 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
708 } 712 }
709 713
710 /* Set the HPTE to point to pfn */ 714 /*
711 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); 715 * Set the HPTE to point to pfn.
716 * Since the pfn is at PAGE_SIZE granularity, make sure we
717 * don't mask out lower-order bits if psize < PAGE_SIZE.
718 */
719 if (psize < PAGE_SIZE)
720 psize = PAGE_SIZE;
721 r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
712 if (hpte_is_writable(r) && !write_ok) 722 if (hpte_is_writable(r) && !write_ok)
713 r = hpte_make_readonly(r); 723 r = hpte_make_readonly(r);
714 ret = RESUME_GUEST; 724 ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f1c3bc..b51d5db78068 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) 131static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
132{ 132{
133 struct kvmppc_vcore *vc = vcpu->arch.vcore; 133 struct kvmppc_vcore *vc = vcpu->arch.vcore;
134 unsigned long flags;
134 135
135 spin_lock(&vcpu->arch.tbacct_lock); 136 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
136 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && 137 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
137 vc->preempt_tb != TB_NIL) { 138 vc->preempt_tb != TB_NIL) {
138 vc->stolen_tb += mftb() - vc->preempt_tb; 139 vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
143 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; 144 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
144 vcpu->arch.busy_preempt = TB_NIL; 145 vcpu->arch.busy_preempt = TB_NIL;
145 } 146 }
146 spin_unlock(&vcpu->arch.tbacct_lock); 147 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
147} 148}
148 149
149static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) 150static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
150{ 151{
151 struct kvmppc_vcore *vc = vcpu->arch.vcore; 152 struct kvmppc_vcore *vc = vcpu->arch.vcore;
153 unsigned long flags;
152 154
153 spin_lock(&vcpu->arch.tbacct_lock); 155 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
154 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) 156 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
155 vc->preempt_tb = mftb(); 157 vc->preempt_tb = mftb();
156 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) 158 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
157 vcpu->arch.busy_preempt = mftb(); 159 vcpu->arch.busy_preempt = mftb();
158 spin_unlock(&vcpu->arch.tbacct_lock); 160 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
159} 161}
160 162
161static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) 163static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
486 */ 488 */
487 if (vc->vcore_state != VCORE_INACTIVE && 489 if (vc->vcore_state != VCORE_INACTIVE &&
488 vc->runner->arch.run_task != current) { 490 vc->runner->arch.run_task != current) {
489 spin_lock(&vc->runner->arch.tbacct_lock); 491 spin_lock_irq(&vc->runner->arch.tbacct_lock);
490 p = vc->stolen_tb; 492 p = vc->stolen_tb;
491 if (vc->preempt_tb != TB_NIL) 493 if (vc->preempt_tb != TB_NIL)
492 p += now - vc->preempt_tb; 494 p += now - vc->preempt_tb;
493 spin_unlock(&vc->runner->arch.tbacct_lock); 495 spin_unlock_irq(&vc->runner->arch.tbacct_lock);
494 } else { 496 } else {
495 p = vc->stolen_tb; 497 p = vc->stolen_tb;
496 } 498 }
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
512 core_stolen = vcore_stolen_time(vc, now); 514 core_stolen = vcore_stolen_time(vc, now);
513 stolen = core_stolen - vcpu->arch.stolen_logged; 515 stolen = core_stolen - vcpu->arch.stolen_logged;
514 vcpu->arch.stolen_logged = core_stolen; 516 vcpu->arch.stolen_logged = core_stolen;
515 spin_lock(&vcpu->arch.tbacct_lock); 517 spin_lock_irq(&vcpu->arch.tbacct_lock);
516 stolen += vcpu->arch.busy_stolen; 518 stolen += vcpu->arch.busy_stolen;
517 vcpu->arch.busy_stolen = 0; 519 vcpu->arch.busy_stolen = 0;
518 spin_unlock(&vcpu->arch.tbacct_lock); 520 spin_unlock_irq(&vcpu->arch.tbacct_lock);
519 if (!dt || !vpa) 521 if (!dt || !vpa)
520 return; 522 return;
521 memset(dt, 0, sizeof(struct dtl_entry)); 523 memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
589 if (list_empty(&vcpu->kvm->arch.rtas_tokens)) 591 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
590 return RESUME_HOST; 592 return RESUME_HOST;
591 593
594 idx = srcu_read_lock(&vcpu->kvm->srcu);
592 rc = kvmppc_rtas_hcall(vcpu); 595 rc = kvmppc_rtas_hcall(vcpu);
596 srcu_read_unlock(&vcpu->kvm->srcu, idx);
593 597
594 if (rc == -ENOENT) 598 if (rc == -ENOENT)
595 return RESUME_HOST; 599 return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1115 1119
1116 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) 1120 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1117 return; 1121 return;
1118 spin_lock(&vcpu->arch.tbacct_lock); 1122 spin_lock_irq(&vcpu->arch.tbacct_lock);
1119 now = mftb(); 1123 now = mftb();
1120 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - 1124 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1121 vcpu->arch.stolen_logged; 1125 vcpu->arch.stolen_logged;
1122 vcpu->arch.busy_preempt = now; 1126 vcpu->arch.busy_preempt = now;
1123 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; 1127 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
1124 spin_unlock(&vcpu->arch.tbacct_lock); 1128 spin_unlock_irq(&vcpu->arch.tbacct_lock);
1125 --vc->n_runnable; 1129 --vc->n_runnable;
1126 list_del(&vcpu->arch.run_list); 1130 list_del(&vcpu->arch.run_list);
1127} 1131}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c515440ad1a..8689e2e30857 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
225 is_io = pa & (HPTE_R_I | HPTE_R_W); 225 is_io = pa & (HPTE_R_I | HPTE_R_W);
226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); 226 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
227 pa &= PAGE_MASK; 227 pa &= PAGE_MASK;
228 pa |= gpa & ~PAGE_MASK;
228 } else { 229 } else {
229 /* Translate to host virtual address */ 230 /* Translate to host virtual address */
230 hva = __gfn_to_hva_memslot(memslot, gfn); 231 hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
238 ptel = hpte_make_readonly(ptel); 239 ptel = hpte_make_readonly(ptel);
239 is_io = hpte_cache_bits(pte_val(pte)); 240 is_io = hpte_cache_bits(pte_val(pte));
240 pa = pte_pfn(pte) << PAGE_SHIFT; 241 pa = pte_pfn(pte) << PAGE_SHIFT;
242 pa |= hva & (pte_size - 1);
243 pa |= gpa & ~PAGE_MASK;
241 } 244 }
242 } 245 }
243 246
244 if (pte_size < psize) 247 if (pte_size < psize)
245 return H_PARAMETER; 248 return H_PARAMETER;
246 if (pa && pte_size > psize)
247 pa |= gpa & (pte_size - 1);
248 249
249 ptel &= ~(HPTE_R_PP0 - psize); 250 ptel &= ~(HPTE_R_PP0 - psize);
250 ptel |= pa; 251 ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
749 20, /* 1M, unsupported */ 750 20, /* 1M, unsupported */
750}; 751};
751 752
753/* When called from virtmode, this func should be protected by
754 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
755 * can trigger deadlock issue.
756 */
752long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, 757long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
753 unsigned long valid) 758 unsigned long valid)
754{ 759{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75b1925..be4fa04a37c9 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
153 153
15413: b machine_check_fwnmi 15413: b machine_check_fwnmi
155 155
156
157/* 156/*
158 * We come in here when wakened from nap mode on a secondary hw thread. 157 * We come in here when wakened from nap mode on a secondary hw thread.
159 * Relocation is off and most register values are lost. 158 * Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
224 /* Clear our vcpu pointer so we don't come back in early */ 223 /* Clear our vcpu pointer so we don't come back in early */
225 li r0, 0 224 li r0, 0
226 std r0, HSTATE_KVM_VCPU(r13) 225 std r0, HSTATE_KVM_VCPU(r13)
226 /*
227 * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
228 * the nap_count, because once the increment to nap_count is
229 * visible we could be given another vcpu.
230 */
227 lwsync 231 lwsync
228 /* Clear any pending IPI - we're an offline thread */ 232 /* Clear any pending IPI - we're an offline thread */
229 ld r5, HSTATE_XICS_PHYS(r13) 233 ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
241 /* increment the nap count and then go to nap mode */ 245 /* increment the nap count and then go to nap mode */
242 ld r4, HSTATE_KVM_VCORE(r13) 246 ld r4, HSTATE_KVM_VCORE(r13)
243 addi r4, r4, VCORE_NAP_COUNT 247 addi r4, r4, VCORE_NAP_COUNT
244 lwsync /* make previous updates visible */
24551: lwarx r3, 0, r4 24851: lwarx r3, 0, r4
246 addi r3, r3, 1 249 addi r3, r3, 1
247 stwcx. r3, 0, r4 250 stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
751 * guest CR, R12 saved in shadow VCPU SCRATCH1/0 754 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
752 * guest R13 saved in SPRN_SCRATCH0 755 * guest R13 saved in SPRN_SCRATCH0
753 */ 756 */
754 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ 757 std r9, HSTATE_SCRATCH2(r13)
755 std r9, HSTATE_HOST_R2(r13)
756 758
757 lbz r9, HSTATE_IN_GUEST(r13) 759 lbz r9, HSTATE_IN_GUEST(r13)
758 cmpwi r9, KVM_GUEST_MODE_HOST_HV 760 cmpwi r9, KVM_GUEST_MODE_HOST_HV
759 beq kvmppc_bad_host_intr 761 beq kvmppc_bad_host_intr
760#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE 762#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
761 cmpwi r9, KVM_GUEST_MODE_GUEST 763 cmpwi r9, KVM_GUEST_MODE_GUEST
762 ld r9, HSTATE_HOST_R2(r13) 764 ld r9, HSTATE_SCRATCH2(r13)
763 beq kvmppc_interrupt_pr 765 beq kvmppc_interrupt_pr
764#endif 766#endif
765 /* We're now back in the host but in guest MMU context */ 767 /* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
779 std r6, VCPU_GPR(R6)(r9) 781 std r6, VCPU_GPR(R6)(r9)
780 std r7, VCPU_GPR(R7)(r9) 782 std r7, VCPU_GPR(R7)(r9)
781 std r8, VCPU_GPR(R8)(r9) 783 std r8, VCPU_GPR(R8)(r9)
782 ld r0, HSTATE_HOST_R2(r13) 784 ld r0, HSTATE_SCRATCH2(r13)
783 std r0, VCPU_GPR(R9)(r9) 785 std r0, VCPU_GPR(R9)(r9)
784 std r10, VCPU_GPR(R10)(r9) 786 std r10, VCPU_GPR(R10)(r9)
785 std r11, VCPU_GPR(R11)(r9) 787 std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
990 */ 992 */
991 /* Increment the threads-exiting-guest count in the 0xff00 993 /* Increment the threads-exiting-guest count in the 0xff00
992 bits of vcore->entry_exit_count */ 994 bits of vcore->entry_exit_count */
993 lwsync
994 ld r5,HSTATE_KVM_VCORE(r13) 995 ld r5,HSTATE_KVM_VCORE(r13)
995 addi r6,r5,VCORE_ENTRY_EXIT 996 addi r6,r5,VCORE_ENTRY_EXIT
99641: lwarx r3,0,r6 99741: lwarx r3,0,r6
997 addi r0,r3,0x100 998 addi r0,r3,0x100
998 stwcx. r0,0,r6 999 stwcx. r0,0,r6
999 bne 41b 1000 bne 41b
1000 lwsync 1001 isync /* order stwcx. vs. reading napping_threads */
1001 1002
1002 /* 1003 /*
1003 * At this point we have an interrupt that we have to pass 1004 * At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1030 sld r0,r0,r4 1031 sld r0,r0,r4
1031 andc. r3,r3,r0 /* no sense IPI'ing ourselves */ 1032 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1032 beq 43f 1033 beq 43f
1034 /* Order entry/exit update vs. IPIs */
1035 sync
1033 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ 1036 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1034 subf r6,r4,r13 1037 subf r6,r4,r13
103542: andi. r0,r3,1 103842: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1638 bge kvm_cede_exit 1641 bge kvm_cede_exit
1639 stwcx. r4,0,r6 1642 stwcx. r4,0,r6
1640 bne 31b 1643 bne 31b
1644 /* order napping_threads update vs testing entry_exit_count */
1645 isync
1641 li r0,1 1646 li r0,1
1642 stb r0,HSTATE_NAPPING(r13) 1647 stb r0,HSTATE_NAPPING(r13)
1643 /* order napping_threads update vs testing entry_exit_count */
1644 lwsync
1645 mr r4,r3 1648 mr r4,r3
1646 lwz r7,VCORE_ENTRY_EXIT(r5) 1649 lwz r7,VCORE_ENTRY_EXIT(r5)
1647 cmpwi r7,0x100 1650 cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041c14ea..f779450cb07c 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
129 * R12 = exit handler id 129 * R12 = exit handler id
130 * R13 = PACA 130 * R13 = PACA
131 * SVCPU.* = guest * 131 * SVCPU.* = guest *
132 * MSR.EE = 1
132 * 133 *
133 */ 134 */
134 135
136 PPC_LL r3, GPR4(r1) /* vcpu pointer */
137
138 /*
139 * kvmppc_copy_from_svcpu can clobber volatile registers, save
140 * the exit handler id to the vcpu and restore it from there later.
141 */
142 stw r12, VCPU_TRAP(r3)
143
135 /* Transfer reg values from shadow vcpu back to vcpu struct */ 144 /* Transfer reg values from shadow vcpu back to vcpu struct */
136 /* On 64-bit, interrupts are still off at this point */ 145 /* On 64-bit, interrupts are still off at this point */
137 PPC_LL r3, GPR4(r1) /* vcpu pointer */ 146
138 GET_SHADOW_VCPU(r4) 147 GET_SHADOW_VCPU(r4)
139 bl FUNC(kvmppc_copy_from_svcpu) 148 bl FUNC(kvmppc_copy_from_svcpu)
140 nop 149 nop
141 150
142#ifdef CONFIG_PPC_BOOK3S_64 151#ifdef CONFIG_PPC_BOOK3S_64
143 /* Re-enable interrupts */
144 ld r3, HSTATE_HOST_MSR(r13)
145 ori r3, r3, MSR_EE
146 MTMSR_EERI(r3)
147
148 /* 152 /*
149 * Reload kernel SPRG3 value. 153 * Reload kernel SPRG3 value.
150 * No need to save guest value as usermode can't modify SPRG3. 154 * No need to save guest value as usermode can't modify SPRG3.
151 */ 155 */
152 ld r3, PACA_SPRG3(r13) 156 ld r3, PACA_SPRG3(r13)
153 mtspr SPRN_SPRG3, r3 157 mtspr SPRN_SPRG3, r3
154
155#endif /* CONFIG_PPC_BOOK3S_64 */ 158#endif /* CONFIG_PPC_BOOK3S_64 */
156 159
157 /* R7 = vcpu */ 160 /* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
177 PPC_STL r31, VCPU_GPR(R31)(r7) 180 PPC_STL r31, VCPU_GPR(R31)(r7)
178 181
179 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ 182 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
180 mr r5, r12 183 lwz r5, VCPU_TRAP(r7)
181 184
182 /* Restore r3 (kvm_run) and r4 (vcpu) */ 185 /* Restore r3 (kvm_run) and r4 (vcpu) */
183 REST_2GPRS(3, r1) 186 REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3dd171..5b9e9063cfaf 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 66 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); 67 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; 68 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
69 svcpu->in_use = 0;
69 svcpu_put(svcpu); 70 svcpu_put(svcpu);
70#endif 71#endif
71 vcpu->cpu = smp_processor_id(); 72 vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
78{ 79{
79#ifdef CONFIG_PPC_BOOK3S_64 80#ifdef CONFIG_PPC_BOOK3S_64
80 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); 81 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 if (svcpu->in_use) {
83 kvmppc_copy_from_svcpu(vcpu, svcpu);
84 }
81 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); 85 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
82 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; 86 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
83 svcpu_put(svcpu); 87 svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
110 svcpu->ctr = vcpu->arch.ctr; 114 svcpu->ctr = vcpu->arch.ctr;
111 svcpu->lr = vcpu->arch.lr; 115 svcpu->lr = vcpu->arch.lr;
112 svcpu->pc = vcpu->arch.pc; 116 svcpu->pc = vcpu->arch.pc;
117 svcpu->in_use = true;
113} 118}
114 119
115/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ 120/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
116void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, 121void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
117 struct kvmppc_book3s_shadow_vcpu *svcpu) 122 struct kvmppc_book3s_shadow_vcpu *svcpu)
118{ 123{
124 /*
125 * vcpu_put would just call us again because in_use hasn't
126 * been updated yet.
127 */
128 preempt_disable();
129
130 /*
131 * Maybe we were already preempted and synced the svcpu from
132 * our preempt notifiers. Don't bother touching this svcpu then.
133 */
134 if (!svcpu->in_use)
135 goto out;
136
119 vcpu->arch.gpr[0] = svcpu->gpr[0]; 137 vcpu->arch.gpr[0] = svcpu->gpr[0];
120 vcpu->arch.gpr[1] = svcpu->gpr[1]; 138 vcpu->arch.gpr[1] = svcpu->gpr[1];
121 vcpu->arch.gpr[2] = svcpu->gpr[2]; 139 vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
139 vcpu->arch.fault_dar = svcpu->fault_dar; 157 vcpu->arch.fault_dar = svcpu->fault_dar;
140 vcpu->arch.fault_dsisr = svcpu->fault_dsisr; 158 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
141 vcpu->arch.last_inst = svcpu->last_inst; 159 vcpu->arch.last_inst = svcpu->last_inst;
160 svcpu->in_use = false;
161
162out:
163 preempt_enable();
142} 164}
143 165
144static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) 166static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9edab8..c3c5231adade 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
153 153
154 li r6, MSR_IR | MSR_DR 154 li r6, MSR_IR | MSR_DR
155 andc r6, r5, r6 /* Clear DR and IR in MSR value */ 155 andc r6, r5, r6 /* Clear DR and IR in MSR value */
156#ifdef CONFIG_PPC_BOOK3S_32
157 /* 156 /*
158 * Set EE in HOST_MSR so that it's enabled when we get into our 157 * Set EE in HOST_MSR so that it's enabled when we get into our
159 * C exit handler function. On 64-bit we delay enabling 158 * C exit handler function.
160 * interrupts until we have finished transferring stuff
161 * to or from the PACA.
162 */ 159 */
163 ori r5, r5, MSR_EE 160 ori r5, r5, MSR_EE
164#endif
165 mtsrr0 r7 161 mtsrr0 r7
166 mtsrr1 r6 162 mtsrr1 r6
167 RFI 163 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a210b9a..0591e05db74b 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 681int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
682{ 682{
683 int ret, s; 683 int ret, s;
684 struct thread_struct thread; 684 struct debug_reg debug;
685#ifdef CONFIG_PPC_FPU 685#ifdef CONFIG_PPC_FPU
686 struct thread_fp_state fp; 686 struct thread_fp_state fp;
687 int fpexc_mode; 687 int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
723#endif 723#endif
724 724
725 /* Switch to guest debug context */ 725 /* Switch to guest debug context */
726 thread.debug = vcpu->arch.shadow_dbg_reg; 726 debug = vcpu->arch.shadow_dbg_reg;
727 switch_booke_debug_regs(&thread); 727 switch_booke_debug_regs(&debug);
728 thread.debug = current->thread.debug; 728 debug = current->thread.debug;
729 current->thread.debug = vcpu->arch.shadow_dbg_reg; 729 current->thread.debug = vcpu->arch.shadow_dbg_reg;
730 730
731 kvmppc_fix_ee_before_entry(); 731 kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
736 We also get here with interrupts enabled. */ 736 We also get here with interrupts enabled. */
737 737
738 /* Switch back to user space debug context */ 738 /* Switch back to user space debug context */
739 switch_booke_debug_regs(&thread); 739 switch_booke_debug_regs(&debug);
740 current->thread.debug = thread.debug; 740 current->thread.debug = debug;
741 741
742#ifdef CONFIG_PPC_FPU 742#ifdef CONFIG_PPC_FPU
743 kvmppc_save_guest_fp(vcpu); 743 kvmppc_save_guest_fp(vcpu);