diff options
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s.h | 4 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_asm.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/include/asm/switch_to.h | 2 | ||||
| -rw-r--r-- | arch/powerpc/kernel/asm-offsets.c | 1 | ||||
| -rw-r--r-- | arch/powerpc/kernel/process.c | 32 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 18 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 24 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 9 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 23 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_interrupts.S | 19 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 22 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_rmhandlers.S | 6 | ||||
| -rw-r--r-- | arch/powerpc/kvm/booke.c | 12 |
13 files changed, 112 insertions, 62 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
| @@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
| 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
| 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
| 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
| 195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
| 196 | struct kvm_vcpu *vcpu); | ||
| 197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
| 198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
| 195 | 199 | ||
| 196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
| 197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
| @@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
| 79 | ulong vmhandler; | 79 | ulong vmhandler; |
| 80 | ulong scratch0; | 80 | ulong scratch0; |
| 81 | ulong scratch1; | 81 | ulong scratch1; |
| 82 | ulong scratch2; | ||
| 82 | u8 in_guest; | 83 | u8 in_guest; |
| 83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
| 84 | u8 napping; | 85 | u8 napping; |
| @@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
| 106 | }; | 107 | }; |
| 107 | 108 | ||
| 108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
| 110 | bool in_use; | ||
| 109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
| 110 | u32 cr; | 112 | u32 cr; |
| 111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
| @@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
| 35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
| 36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
| 37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
| 38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
| 39 | 39 | ||
| 40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
| 41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
| @@ -576,6 +576,7 @@ int main(void) | |||
| 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
| 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
| 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
| 579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
| 579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
| 580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
| 581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
| 339 | #endif | 339 | #endif |
| 340 | } | 340 | } |
| 341 | 341 | ||
| 342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
| 343 | { | 343 | { |
| 344 | /* | 344 | /* |
| 345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
| @@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
| 348 | */ | 348 | */ |
| 349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
| 350 | 350 | ||
| 351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
| 352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
| 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
| 354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
| 355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
| 356 | #endif | 356 | #endif |
| 357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
| 358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
| 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
| 360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
| 361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
| 362 | #endif | 362 | #endif |
| 363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
| 364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
| 365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
| 366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
| 367 | #endif | 367 | #endif |
| 368 | } | 368 | } |
| 369 | /* | 369 | /* |
| @@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
| 371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
| 372 | * stored in the new thread. | 372 | * stored in the new thread. |
| 373 | */ | 373 | */ |
| 374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
| 375 | { | 375 | { |
| 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
| 377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
| 378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
| 379 | } | 379 | } |
| 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
| 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
| @@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
| 683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
| 684 | 684 | ||
| 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
| 686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
| 687 | #else | 687 | #else |
| 688 | /* | 688 | /* |
| 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
| @@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
| 470 | } | 470 | } |
| 471 | 471 | ||
| 472 | preempt_disable(); | ||
| 472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
| 473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
| 474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
| 475 | if (index < 0) | 476 | if (index < 0) { |
| 477 | preempt_enable(); | ||
| 476 | return -ENOENT; | 478 | return -ENOENT; |
| 479 | } | ||
| 477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
| 478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
| 479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
| @@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
| 481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
| 482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
| 483 | hptep[0] = v; | 486 | hptep[0] = v; |
| 487 | preempt_enable(); | ||
| 484 | 488 | ||
| 485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
| 486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
| @@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 665 | return -EFAULT; | 669 | return -EFAULT; |
| 666 | } else { | 670 | } else { |
| 667 | page = pages[0]; | 671 | page = pages[0]; |
| 672 | pfn = page_to_pfn(page); | ||
| 668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
| 669 | page = compound_head(page); | 674 | page = compound_head(page); |
| 670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
| @@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 689 | } | 694 | } |
| 690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
| 691 | } | 696 | } |
| 692 | pfn = page_to_pfn(page); | ||
| 693 | } | 697 | } |
| 694 | 698 | ||
| 695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
| @@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
| 707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
| 708 | } | 712 | } |
| 709 | 713 | ||
| 710 | /* Set the HPTE to point to pfn */ | 714 | /* |
| 711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
| 716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
| 717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
| 718 | */ | ||
| 719 | if (psize < PAGE_SIZE) | ||
| 720 | psize = PAGE_SIZE; | ||
| 721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
| 712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
| 713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
| 714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
| 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
| 132 | { | 132 | { |
| 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 134 | unsigned long flags; | ||
| 134 | 135 | ||
| 135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
| 136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
| 137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
| 138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
| @@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
| 143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
| 144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
| 145 | } | 146 | } |
| 146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
| 147 | } | 148 | } |
| 148 | 149 | ||
| 149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
| 150 | { | 151 | { |
| 151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
| 153 | unsigned long flags; | ||
| 152 | 154 | ||
| 153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
| 154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
| 155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
| 156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
| 157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
| 158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
| 159 | } | 161 | } |
| 160 | 162 | ||
| 161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
| @@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
| 486 | */ | 488 | */ |
| 487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
| 488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
| 489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
| 490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
| 491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
| 492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
| 493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
| 494 | } else { | 496 | } else { |
| 495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
| 496 | } | 498 | } |
| @@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
| 512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
| 513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
| 514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
| 515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
| 516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
| 517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
| 518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
| 519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
| 520 | return; | 522 | return; |
| 521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
| @@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
| 589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
| 590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
| 591 | 593 | ||
| 594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
| 592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
| 596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
| 593 | 597 | ||
| 594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
| 595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
| @@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
| 1115 | 1119 | ||
| 1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
| 1117 | return; | 1121 | return; |
| 1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
| 1119 | now = mftb(); | 1123 | now = mftb(); |
| 1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
| 1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
| 1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
| 1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
| 1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
| 1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
| 1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
| 1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
| @@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
| 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
| 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
| 227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
| 228 | pa |= gpa & ~PAGE_MASK; | ||
| 228 | } else { | 229 | } else { |
| 229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
| 230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
| @@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
| 238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
| 239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
| 240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
| 242 | pa |= hva & (pte_size - 1); | ||
| 243 | pa |= gpa & ~PAGE_MASK; | ||
| 241 | } | 244 | } |
| 242 | } | 245 | } |
| 243 | 246 | ||
| 244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
| 245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
| 246 | if (pa && pte_size > psize) | ||
| 247 | pa |= gpa & (pte_size - 1); | ||
| 248 | 249 | ||
| 249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
| 250 | ptel |= pa; | 251 | ptel |= pa; |
| @@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
| 749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
| 750 | }; | 751 | }; |
| 751 | 752 | ||
| 753 | /* When called from virtmode, this func should be protected by | ||
| 754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
| 755 | * can trigger deadlock issue. | ||
| 756 | */ | ||
| 752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
| 753 | unsigned long valid) | 758 | unsigned long valid) |
| 754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
| @@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
| 153 | 153 | ||
| 154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
| 155 | 155 | ||
| 156 | |||
| 157 | /* | 156 | /* |
| 158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
| 159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
| @@ -224,6 +223,11 @@ kvm_start_guest: | |||
| 224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
| 225 | li r0, 0 | 224 | li r0, 0 |
| 226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
| 226 | /* | ||
| 227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
| 228 | * the nap_count, because once the increment to nap_count is | ||
| 229 | * visible we could be given another vcpu. | ||
| 230 | */ | ||
| 227 | lwsync | 231 | lwsync |
| 228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
| 229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
| @@ -241,7 +245,6 @@ kvm_start_guest: | |||
| 241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
| 242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
| 243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
| 244 | lwsync /* make previous updates visible */ | ||
| 245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
| 246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
| 247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
| @@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
| 751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
| 752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
| 753 | */ | 756 | */ |
| 754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
| 755 | std r9, HSTATE_HOST_R2(r13) | ||
| 756 | 758 | ||
| 757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
| 758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
| 759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
| 760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
| 761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
| 762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
| 763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
| 764 | #endif | 766 | #endif |
| 765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
| @@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
| 779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
| 780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
| 781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
| 782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
| 783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
| 784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
| 785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
| @@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
| 990 | */ | 992 | */ |
| 991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
| 992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
| 993 | lwsync | ||
| 994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
| 995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
| 996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
| 997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
| 998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
| 999 | bne 41b | 1000 | bne 41b |
| 1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
| 1001 | 1002 | ||
| 1002 | /* | 1003 | /* |
| 1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
| @@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
| 1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
| 1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
| 1032 | beq 43f | 1033 | beq 43f |
| 1034 | /* Order entry/exit update vs. IPIs */ | ||
| 1035 | sync | ||
| 1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
| 1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
| 1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
| @@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
| 1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
| 1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
| 1640 | bne 31b | 1643 | bne 31b |
| 1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
| 1645 | isync | ||
| 1641 | li r0,1 | 1646 | li r0,1 |
| 1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
| 1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
| 1644 | lwsync | ||
| 1645 | mr r4,r3 | 1648 | mr r4,r3 |
| 1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
| 1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
| @@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
| 129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
| 130 | * R13 = PACA | 130 | * R13 = PACA |
| 131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
| 132 | * MSR.EE = 1 | ||
| 132 | * | 133 | * |
| 133 | */ | 134 | */ |
| 134 | 135 | ||
| 136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
| 137 | |||
| 138 | /* | ||
| 139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
| 140 | * the exit handler id to the vcpu and restore it from there later. | ||
| 141 | */ | ||
| 142 | stw r12, VCPU_TRAP(r3) | ||
| 143 | |||
| 135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
| 136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
| 137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
| 138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
| 139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
| 140 | nop | 149 | nop |
| 141 | 150 | ||
| 142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 143 | /* Re-enable interrupts */ | ||
| 144 | ld r3, HSTATE_HOST_MSR(r13) | ||
| 145 | ori r3, r3, MSR_EE | ||
| 146 | MTMSR_EERI(r3) | ||
| 147 | |||
| 148 | /* | 152 | /* |
| 149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
| 150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
| 151 | */ | 155 | */ |
| 152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
| 153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
| 154 | |||
| 155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
| 156 | 159 | ||
| 157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
| @@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
| 177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
| 178 | 181 | ||
| 179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
| 180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
| 181 | 184 | ||
| 182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
| 183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
| @@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
| 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
| 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
| 69 | svcpu->in_use = 0; | ||
| 69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
| 70 | #endif | 71 | #endif |
| 71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
| @@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
| 78 | { | 79 | { |
| 79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
| 80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
| 82 | if (svcpu->in_use) { | ||
| 83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
| 84 | } | ||
| 81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
| 82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
| 83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
| @@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
| 110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
| 111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
| 112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
| 117 | svcpu->in_use = true; | ||
| 113 | } | 118 | } |
| 114 | 119 | ||
| 115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
| 116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
| 117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
| 118 | { | 123 | { |
| 124 | /* | ||
| 125 | * vcpu_put would just call us again because in_use hasn't | ||
| 126 | * been updated yet. | ||
| 127 | */ | ||
| 128 | preempt_disable(); | ||
| 129 | |||
| 130 | /* | ||
| 131 | * Maybe we were already preempted and synced the svcpu from | ||
| 132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
| 133 | */ | ||
| 134 | if (!svcpu->in_use) | ||
| 135 | goto out; | ||
| 136 | |||
| 119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
| 120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
| 121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
| @@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
| 139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
| 140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
| 141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
| 160 | svcpu->in_use = false; | ||
| 161 | |||
| 162 | out: | ||
| 163 | preempt_enable(); | ||
| 142 | } | 164 | } |
| 143 | 165 | ||
| 144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
| @@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
| 153 | 153 | ||
| 154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
| 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
| 156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
| 157 | /* | 156 | /* |
| 158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
| 159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
| 160 | * interrupts until we have finished transferring stuff | ||
| 161 | * to or from the PACA. | ||
| 162 | */ | 159 | */ |
| 163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
| 164 | #endif | ||
| 165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
| 166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
| 167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
| @@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
| 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
| 682 | { | 682 | { |
| 683 | int ret, s; | 683 | int ret, s; |
| 684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
| 685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
| 686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
| 687 | int fpexc_mode; | 687 | int fpexc_mode; |
| @@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 723 | #endif | 723 | #endif |
| 724 | 724 | ||
| 725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
| 726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
| 727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
| 728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
| 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
| 730 | 730 | ||
| 731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
| @@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
| 736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
| 737 | 737 | ||
| 738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
| 739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
| 740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
| 741 | 741 | ||
| 742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
| 743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
