aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/optvfault.S11
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/kvm/44x_tlb.c8
-rw-r--r--arch/powerpc/kvm/powerpc.c1
-rw-r--r--arch/s390/kvm/sigp.c5
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h1
-rw-r--r--arch/x86/kvm/vmx.c4
9 files changed, 29 insertions, 7 deletions
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 3ab4d6d50704..92cef66ca268 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -58,7 +58,7 @@ endif
58kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o 58kvm-objs := $(common-objs) kvm-ia64.o kvm_fw.o
59obj-$(CONFIG_KVM) += kvm.o 59obj-$(CONFIG_KVM) += kvm.o
60 60
61EXTRA_CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127 61CFLAGS_vcpu.o += -mfixed-range=f2-f5,f12-f127
62kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \ 62kvm-intel-objs = vmm.o vmm_ivt.o trampoline.o vcpu.o optvfault.o mmio.o \
63 vtlb.o process.o 63 vtlb.o process.o
64#Add link memcpy and memset to avoid possible structure assignment error 64#Add link memcpy and memset to avoid possible structure assignment error
diff --git a/arch/ia64/kvm/optvfault.S b/arch/ia64/kvm/optvfault.S
index 634abad979b5..32254ce9a1bd 100644
--- a/arch/ia64/kvm/optvfault.S
+++ b/arch/ia64/kvm/optvfault.S
@@ -107,10 +107,10 @@ END(kvm_vps_resume_normal)
107GLOBAL_ENTRY(kvm_vps_resume_handler) 107GLOBAL_ENTRY(kvm_vps_resume_handler)
108 movl r30 = PAL_VPS_RESUME_HANDLER 108 movl r30 = PAL_VPS_RESUME_HANDLER
109 ;; 109 ;;
110 ld8 r27=[r25] 110 ld8 r26=[r25]
111 shr r17=r17,IA64_ISR_IR_BIT 111 shr r17=r17,IA64_ISR_IR_BIT
112 ;; 112 ;;
113 dep r27=r17,r27,63,1 // bit 63 of r27 indicate whether enable CFLE 113 dep r26=r17,r26,63,1 // bit 63 of r26 indicate whether enable CFLE
114 mov pr=r23,-2 114 mov pr=r23,-2
115 br.sptk.many kvm_vps_entry 115 br.sptk.many kvm_vps_entry
116END(kvm_vps_resume_handler) 116END(kvm_vps_resume_handler)
@@ -894,12 +894,15 @@ ENTRY(kvm_resume_to_guest)
894 ;; 894 ;;
895 ld8 r19=[r19] 895 ld8 r19=[r19]
896 mov b0=r29 896 mov b0=r29
897 cmp.ne p6,p7 = r0,r0 897 mov r27=cr.isr
898 ;; 898 ;;
899 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p1=vpsr.ic 899 tbit.z p6,p7 = r19,IA64_PSR_IC_BIT // p7=vpsr.ic
900 shr r27=r27,IA64_ISR_IR_BIT
900 ;; 901 ;;
901 (p6) ld8 r26=[r25] 902 (p6) ld8 r26=[r25]
902 (p7) mov b0=r28 903 (p7) mov b0=r28
904 ;;
905 (p6) dep r26=r27,r26,63,1
903 mov pr=r31,-2 906 mov pr=r31,-2
904 br.sptk.many b0 // call pal service 907 br.sptk.many b0 // call pal service
905 ;; 908 ;;
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 8931ba729d2b..bb62ad876de3 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -104,4 +104,6 @@ static inline void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
104 } 104 }
105} 105}
106 106
107extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
108
107#endif /* __POWERPC_KVM_PPC_H__ */ 109#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c
index 2e227a412bc2..ad72c6f9811f 100644
--- a/arch/powerpc/kvm/44x_tlb.c
+++ b/arch/powerpc/kvm/44x_tlb.c
@@ -124,6 +124,14 @@ static void kvmppc_44x_shadow_release(struct kvm_vcpu *vcpu,
124 } 124 }
125} 125}
126 126
127void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu)
128{
129 int i;
130
131 for (i = 0; i <= tlb_44x_hwater; i++)
132 kvmppc_44x_shadow_release(vcpu, i);
133}
134
127void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i) 135void kvmppc_tlbe_set_modified(struct kvm_vcpu *vcpu, unsigned int i)
128{ 136{
129 vcpu->arch.shadow_tlb_mod[i] = 1; 137 vcpu->arch.shadow_tlb_mod[i] = 1;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 90a6fc422b23..fda9baada132 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -238,6 +238,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
238 238
239void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 239void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
240{ 240{
241 kvmppc_core_destroy_mmu(vcpu);
241} 242}
242 243
243/* Note: clearing MSR[DE] just means that the debug interrupt will not be 244/* Note: clearing MSR[DE] just means that the debug interrupt will not be
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c
index 170392687ce0..2a01b9e02801 100644
--- a/arch/s390/kvm/sigp.c
+++ b/arch/s390/kvm/sigp.c
@@ -237,6 +237,11 @@ int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
237 u8 order_code; 237 u8 order_code;
238 int rc; 238 int rc;
239 239
240 /* sigp in userspace can exit */
241 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
242 return kvm_s390_inject_program_int(vcpu,
243 PGM_PRIVILEGED_OPERATION);
244
240 order_code = disp2; 245 order_code = disp2;
241 if (base2) 246 if (base2)
242 order_code += vcpu->arch.guest_gprs[base2]; 247 order_code += vcpu->arch.guest_gprs[base2];
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f1983d9477cd..410ddbc1aa2e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1038,13 +1038,13 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1038 } 1038 }
1039 1039
1040 rmap_write_protect(vcpu->kvm, sp->gfn); 1040 rmap_write_protect(vcpu->kvm, sp->gfn);
1041 kvm_unlink_unsync_page(vcpu->kvm, sp);
1041 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1042 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1042 kvm_mmu_zap_page(vcpu->kvm, sp); 1043 kvm_mmu_zap_page(vcpu->kvm, sp);
1043 return 1; 1044 return 1;
1044 } 1045 }
1045 1046
1046 kvm_mmu_flush_tlb(vcpu); 1047 kvm_mmu_flush_tlb(vcpu);
1047 kvm_unlink_unsync_page(vcpu->kvm, sp);
1048 return 0; 1048 return 0;
1049} 1049}
1050 1050
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 613ec9aa674a..84eee43bbe74 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -331,6 +331,7 @@ static int FNAME(shadow_walk_entry)(struct kvm_shadow_walk *_sw,
331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2], 331 r = kvm_read_guest_atomic(vcpu->kvm, gw->pte_gpa[level - 2],
332 &curr_pte, sizeof(curr_pte)); 332 &curr_pte, sizeof(curr_pte));
333 if (r || curr_pte != gw->ptes[level - 2]) { 333 if (r || curr_pte != gw->ptes[level - 2]) {
334 kvm_mmu_put_page(shadow_page, sptep);
334 kvm_release_pfn_clean(sw->pfn); 335 kvm_release_pfn_clean(sw->pfn);
335 sw->sptep = NULL; 336 sw->sptep = NULL;
336 return 1; 337 return 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d06b4dc0e2ea..a4018b01e1f9 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3149,7 +3149,9 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
3149 3149
3150 if (cpu_has_virtual_nmis()) { 3150 if (cpu_has_virtual_nmis()) {
3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) { 3151 if (vcpu->arch.nmi_pending && !vcpu->arch.nmi_injected) {
3152 if (vmx_nmi_enabled(vcpu)) { 3152 if (vcpu->arch.interrupt.pending) {
3153 enable_nmi_window(vcpu);
3154 } else if (vmx_nmi_enabled(vcpu)) {
3153 vcpu->arch.nmi_pending = false; 3155 vcpu->arch.nmi_pending = false;
3154 vcpu->arch.nmi_injected = true; 3156 vcpu->arch.nmi_injected = true;
3155 } else { 3157 } else {