aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-04-22 07:50:09 -0400
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-21 03:43:34 -0400
commit06d68a6c85d95515533663ff002d06753fd772aa (patch)
tree588925752b42047736081a90f6f3cb9ca9df36d3 /arch/s390/kvm
parenta3508fbe9dc6dd3bece0c7bf889cc085a011738c (diff)
KVM: s390: vsie: optimize gmap prefix mapping
In order to not always map the prefix, we have to take care of certain aspects that implicitly unmap the prefix: - Changes to the prefix address - Changes to MSO, because the HVA of the prefix is changed - Changes of the gmap shadow (e.g. unshadowed, asce or edat changes) By properly handling these cases, we can stop remapping the prefix when there is no reason to do so. This also allows us now to not acquire any gmap shadow locks when rerunning the vsie and still having a valid gmap shadow. Please note, to detect changing gmap shadows, we have to keep the reference of the gmap shadow. The address of a gmap shadow does otherwise not reliably indicate if the gmap shadow has changed (the memory chunk could get reused). Acked-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/vsie.c31
1 files changed, 29 insertions, 2 deletions
diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c
index 747d4f900155..2839efcfc5ff 100644
--- a/arch/s390/kvm/vsie.c
+++ b/arch/s390/kvm/vsie.c
@@ -62,6 +62,11 @@ static void prefix_mapped(struct vsie_page *vsie_page)
62 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20); 62 atomic_andnot(PROG_REQUEST, &vsie_page->scb_s.prog20);
63} 63}
64 64
65/* test if the prefix is mapped into the gmap shadow */
66static int prefix_is_mapped(struct vsie_page *vsie_page)
67{
68 return !(atomic_read(&vsie_page->scb_s.prog20) & PROG_REQUEST);
69}
65 70
66/* copy the updated intervention request bits into the shadow scb */ 71/* copy the updated intervention request bits into the shadow scb */
67static void update_intervention_requests(struct vsie_page *vsie_page) 72static void update_intervention_requests(struct vsie_page *vsie_page)
@@ -152,6 +157,7 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
152{ 157{
153 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o; 158 struct kvm_s390_sie_block *scb_o = vsie_page->scb_o;
154 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s; 159 struct kvm_s390_sie_block *scb_s = &vsie_page->scb_s;
160 unsigned long new_mso;
155 int rc; 161 int rc;
156 162
157 /* make sure we don't have any leftovers when reusing the scb */ 163 /* make sure we don't have any leftovers when reusing the scb */
@@ -192,9 +198,13 @@ static int shadow_scb(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
192 scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE; 198 scb_s->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
193 scb_s->icpua = scb_o->icpua; 199 scb_s->icpua = scb_o->icpua;
194 200
201 new_mso = scb_o->mso & 0xfffffffffff00000UL;
202 /* if the hva of the prefix changes, we have to remap the prefix */
203 if (scb_s->mso != new_mso || scb_s->prefix != scb_o->prefix)
204 prefix_unmapped(vsie_page);
195 /* SIE will do mso/msl validity and exception checks for us */ 205 /* SIE will do mso/msl validity and exception checks for us */
196 scb_s->msl = scb_o->msl & 0xfffffffffff00000UL; 206 scb_s->msl = scb_o->msl & 0xfffffffffff00000UL;
197 scb_s->mso = scb_o->mso & 0xfffffffffff00000UL; 207 scb_s->mso = new_mso;
198 scb_s->prefix = scb_o->prefix; 208 scb_s->prefix = scb_o->prefix;
199 209
200 /* We have to definetly flush the tlb if this scb never ran */ 210 /* We have to definetly flush the tlb if this scb never ran */
@@ -262,6 +272,9 @@ static int map_prefix(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
262 u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT; 272 u64 prefix = scb_s->prefix << GUEST_PREFIX_SHIFT;
263 int rc; 273 int rc;
264 274
275 if (prefix_is_mapped(vsie_page))
276 return 0;
277
265 /* mark it as mapped so we can catch any concurrent unmappers */ 278 /* mark it as mapped so we can catch any concurrent unmappers */
266 prefix_mapped(vsie_page); 279 prefix_mapped(vsie_page);
267 280
@@ -532,6 +545,7 @@ static void release_gmap_shadow(struct vsie_page *vsie_page)
532 if (vsie_page->gmap) 545 if (vsie_page->gmap)
533 gmap_put(vsie_page->gmap); 546 gmap_put(vsie_page->gmap);
534 WRITE_ONCE(vsie_page->gmap, NULL); 547 WRITE_ONCE(vsie_page->gmap, NULL);
548 prefix_unmapped(vsie_page);
535} 549}
536 550
537static int acquire_gmap_shadow(struct kvm_vcpu *vcpu, 551static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
@@ -547,6 +561,16 @@ static int acquire_gmap_shadow(struct kvm_vcpu *vcpu,
547 edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8); 561 edat = cr0.edat && test_kvm_facility(vcpu->kvm, 8);
548 edat += edat && test_kvm_facility(vcpu->kvm, 78); 562 edat += edat && test_kvm_facility(vcpu->kvm, 78);
549 563
564 /*
565 * ASCE or EDAT could have changed since last icpt, or the gmap
566 * we're holding has been unshadowed. If the gmap is still valid,
567 * we can safely reuse it.
568 */
569 if (vsie_page->gmap && gmap_shadow_valid(vsie_page->gmap, asce, edat))
570 return 0;
571
572 /* release the old shadow - if any, and mark the prefix as unmapped */
573 release_gmap_shadow(vsie_page);
550 gmap = gmap_shadow(vcpu->arch.gmap, asce, edat); 574 gmap = gmap_shadow(vcpu->arch.gmap, asce, edat);
551 if (IS_ERR(gmap)) 575 if (IS_ERR(gmap))
552 return PTR_ERR(gmap); 576 return PTR_ERR(gmap);
@@ -578,7 +602,6 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
578 rc = do_vsie_run(vcpu, vsie_page); 602 rc = do_vsie_run(vcpu, vsie_page);
579 gmap_enable(vcpu->arch.gmap); 603 gmap_enable(vcpu->arch.gmap);
580 } 604 }
581 release_gmap_shadow(vsie_page);
582 605
583 if (rc == -EAGAIN) 606 if (rc == -EAGAIN)
584 rc = 0; 607 rc = 0;
@@ -667,6 +690,7 @@ static struct vsie_page *get_vsie_page(struct kvm *kvm, unsigned long addr)
667 690
668 vsie_page = page_to_virt(page); 691 vsie_page = page_to_virt(page);
669 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block)); 692 memset(&vsie_page->scb_s, 0, sizeof(struct kvm_s390_sie_block));
693 release_gmap_shadow(vsie_page);
670 vsie_page->scb_s.ihcpu = 0xffffU; 694 vsie_page->scb_s.ihcpu = 0xffffU;
671 return vsie_page; 695 return vsie_page;
672} 696}
@@ -739,6 +763,7 @@ void kvm_s390_vsie_init(struct kvm *kvm)
739/* Destroy the vsie data structures. To be called when a vm is destroyed. */ 763/* Destroy the vsie data structures. To be called when a vm is destroyed. */
740void kvm_s390_vsie_destroy(struct kvm *kvm) 764void kvm_s390_vsie_destroy(struct kvm *kvm)
741{ 765{
766 struct vsie_page *vsie_page;
742 struct page *page; 767 struct page *page;
743 int i; 768 int i;
744 769
@@ -746,6 +771,8 @@ void kvm_s390_vsie_destroy(struct kvm *kvm)
746 for (i = 0; i < kvm->arch.vsie.page_count; i++) { 771 for (i = 0; i < kvm->arch.vsie.page_count; i++) {
747 page = kvm->arch.vsie.pages[i]; 772 page = kvm->arch.vsie.pages[i];
748 kvm->arch.vsie.pages[i] = NULL; 773 kvm->arch.vsie.pages[i] = NULL;
774 vsie_page = page_to_virt(page);
775 release_gmap_shadow(vsie_page);
749 /* free the radix tree entry */ 776 /* free the radix tree entry */
750 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9); 777 radix_tree_delete(&kvm->arch.vsie.addr_to_page, page->index >> 9);
751 __free_page(page); 778 __free_page(page);