aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2015-07-08 07:25:31 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-21 03:43:44 -0400
commit1b7029bec18718eca8cfc5c1c0917444f019be1e (patch)
tree3be941cf36af3b2c6b40a2b6ff691626c5fffd79
parent7fd7f39daa3da822122124730437c4f37e4d82de (diff)
KVM: s390: vsie: try to refault after a reported fault to g2
We can avoid one unneeded SIE entry after we reported a fault to g2. Theoretically, g2 resolves the fault and we can create the shadow mapping directly, instead of failing again when entering the SIE. Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/vsie.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 3ececbbd6bb0..7482488d21d0 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -28,7 +28,9 @@ struct vsie_page {
28 struct kvm_s390_sie_block *scb_o; /* 0x0200 */ 28 struct kvm_s390_sie_block *scb_o; /* 0x0200 */
29 /* the shadow gmap in use by the vsie_page */ 29 /* the shadow gmap in use by the vsie_page */
30 struct gmap *gmap; /* 0x0208 */ 30 struct gmap *gmap; /* 0x0208 */
31 __u8 reserved[0x0700 - 0x0210]; /* 0x0210 */ 31 /* address of the last reported fault to guest2 */
32 unsigned long fault_addr; /* 0x0210 */
33 __u8 reserved[0x0700 - 0x0218]; /* 0x0218 */
32 struct kvm_s390_crypto_cb crycb; /* 0x0700 */ 34 struct kvm_s390_crypto_cb crycb; /* 0x0700 */
33 __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */ 35 __u8 fac[S390_ARCH_FAC_LIST_SIZE_BYTE]; /* 0x0800 */
34} __packed; 36} __packed;
@@ -676,10 +678,27 @@ static int handle_fault(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
676 rc = inject_fault(vcpu, rc, 678 rc = inject_fault(vcpu, rc,
677 current->thread.gmap_addr, 679 current->thread.gmap_addr,
678 current->thread.gmap_write_flag); 680 current->thread.gmap_write_flag);
681 if (rc >= 0)
682 vsie_page->fault_addr = current->thread.gmap_addr;
679 } 683 }
680 return rc; 684 return rc;
681} 685}
682 686
687/*
688 * Retry the previous fault that required guest 2 intervention. This avoids
689 * one superfluous SIE re-entry and direct exit.
690 *
691 * Will ignore any errors. The next SIE fault will do proper fault handling.
692 */
693static void handle_last_fault(struct kvm_vcpu *vcpu,
694 struct vsie_page *vsie_page)
695{
696 if (vsie_page->fault_addr)
697 kvm_s390_shadow_fault(vcpu, vsie_page->gmap,
698 vsie_page->fault_addr);
699 vsie_page->fault_addr = 0;
700}
701
683static inline void clear_vsie_icpt(struct vsie_page *vsie_page) 702static inline void clear_vsie_icpt(struct vsie_page *vsie_page)
684{ 703{
685 vsie_page->scb_s.icptcode = 0; 704 vsie_page->scb_s.icptcode = 0;
@@ -737,6 +756,8 @@ static int do_vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
737 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 756 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
738 int rc; 757 int rc;
739 758
759 handle_last_fault(vcpu, vsie_page);
760
740 if (need_resched()) 761 if (need_resched())
741 schedule(); 762 schedule();
742 if (test_cpu_flag(CIF_MCCK_PENDING)) 763 if (test_cpu_flag(CIF_MCCK_PENDING))
@@ -928,6 +949,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
928 vsie_page = page_to_virt(page); 949 vsie_page = page_to_virt(page);
929 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); 950 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
930 release_gmap_shadow(vsie_page); 951 release_gmap_shadow(vsie_page);
952 vsie_page->fault_addr = 0;
931 vsie_page->scb_s.ihcpu = 0xffffU; 953 vsie_page->scb_s.ihcpu = 0xffffU;
932 return vsie_page; 954 return vsie_page;
933} 955}