aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2016-03-08 06:16:35 -0500
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 03:54:12 -0400
commitaa17aa57cfb95b169f25fe98caae49e477590af3 (patch)
treeb9dab6f5a97c94028e79ef90dec1e9ebb5fa10fd
parent4be130a08420d6918d80c1067f8078f425eb98df (diff)
s390/mm: add kvm shadow fault function
This patch introduces function kvm_s390_shadow_fault() used to resolve a fault on a shadow gmap. This function will do validity checking and build up the shadow page table hierarchy in order to fault in the requested page into the shadow page table structure. If an exception occurs while shadowing, guest 2 has to be notified about it using either an exception or a program interrupt intercept. If concurrent unshadowing occurres, this function will simply return with -EAGAIN and the caller has to retry. Reviewed-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
-rw-r--r--arch/s390/kvm/gaccess.c168
-rw-r--r--arch/s390/kvm/gaccess.h2
2 files changed, 170 insertions, 0 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index 8e245e764c21..ba4985262bce 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -8,6 +8,7 @@
8#include <linux/vmalloc.h> 8#include <linux/vmalloc.h>
9#include <linux/err.h> 9#include <linux/err.h>
10#include <asm/pgtable.h> 10#include <asm/pgtable.h>
11#include <asm/gmap.h>
11#include "kvm-s390.h" 12#include "kvm-s390.h"
12#include "gaccess.h" 13#include "gaccess.h"
13#include <asm/switch_to.h> 14#include <asm/switch_to.h>
@@ -946,3 +947,170 @@ int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra)
946 return 0; 947 return 0;
947 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA); 948 return trans_exc(vcpu, PGM_PROTECTION, gra, 0, GACC_STORE, PROT_TYPE_LA);
948} 949}
950
951/**
952 * kvm_s390_shadow_tables - walk the guest page table and create shadow tables
953 * @sg: pointer to the shadow guest address space structure
954 * @saddr: faulting address in the shadow gmap
955 * @pgt: pointer to the page table address result
956 */
957static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
958 unsigned long *pgt, int *dat_protection)
959{
960 struct gmap *parent;
961 union asce asce;
962 union vaddress vaddr;
963 unsigned long ptr;
964 int rc;
965
966 parent = sg->parent;
967 vaddr.addr = saddr;
968 asce.val = sg->orig_asce;
969 ptr = asce.origin * 4096;
970 switch (asce.dt) {
971 case ASCE_TYPE_REGION1:
972 if (vaddr.rfx01 > asce.tl)
973 return PGM_REGION_FIRST_TRANS;
974 break;
975 case ASCE_TYPE_REGION2:
976 if (vaddr.rfx)
977 return PGM_ASCE_TYPE;
978 if (vaddr.rsx01 > asce.tl)
979 return PGM_REGION_SECOND_TRANS;
980 break;
981 case ASCE_TYPE_REGION3:
982 if (vaddr.rfx || vaddr.rsx)
983 return PGM_ASCE_TYPE;
984 if (vaddr.rtx01 > asce.tl)
985 return PGM_REGION_THIRD_TRANS;
986 break;
987 case ASCE_TYPE_SEGMENT:
988 if (vaddr.rfx || vaddr.rsx || vaddr.rtx)
989 return PGM_ASCE_TYPE;
990 if (vaddr.sx01 > asce.tl)
991 return PGM_SEGMENT_TRANSLATION;
992 break;
993 }
994
995 switch (asce.dt) {
996 case ASCE_TYPE_REGION1: {
997 union region1_table_entry rfte;
998
999 rc = gmap_read_table(parent, ptr + vaddr.rfx * 8, &rfte.val);
1000 if (rc)
1001 return rc;
1002 if (rfte.i)
1003 return PGM_REGION_FIRST_TRANS;
1004 if (rfte.tt != TABLE_TYPE_REGION1)
1005 return PGM_TRANSLATION_SPEC;
1006 if (vaddr.rsx01 < rfte.tf || vaddr.rsx01 > rfte.tl)
1007 return PGM_REGION_SECOND_TRANS;
1008 rc = gmap_shadow_r2t(sg, saddr, rfte.val);
1009 if (rc)
1010 return rc;
1011 ptr = rfte.rto * 4096;
1012 /* fallthrough */
1013 }
1014 case ASCE_TYPE_REGION2: {
1015 union region2_table_entry rste;
1016
1017 rc = gmap_read_table(parent, ptr + vaddr.rsx * 8, &rste.val);
1018 if (rc)
1019 return rc;
1020 if (rste.i)
1021 return PGM_REGION_SECOND_TRANS;
1022 if (rste.tt != TABLE_TYPE_REGION2)
1023 return PGM_TRANSLATION_SPEC;
1024 if (vaddr.rtx01 < rste.tf || vaddr.rtx01 > rste.tl)
1025 return PGM_REGION_THIRD_TRANS;
1026 rc = gmap_shadow_r3t(sg, saddr, rste.val);
1027 if (rc)
1028 return rc;
1029 ptr = rste.rto * 4096;
1030 /* fallthrough */
1031 }
1032 case ASCE_TYPE_REGION3: {
1033 union region3_table_entry rtte;
1034
1035 rc = gmap_read_table(parent, ptr + vaddr.rtx * 8, &rtte.val);
1036 if (rc)
1037 return rc;
1038 if (rtte.i)
1039 return PGM_REGION_THIRD_TRANS;
1040 if (rtte.tt != TABLE_TYPE_REGION3)
1041 return PGM_TRANSLATION_SPEC;
1042 if (vaddr.sx01 < rtte.fc0.tf || vaddr.sx01 > rtte.fc0.tl)
1043 return PGM_SEGMENT_TRANSLATION;
1044 rc = gmap_shadow_sgt(sg, saddr, rtte.val);
1045 if (rc)
1046 return rc;
1047 ptr = rtte.fc0.sto * 4096;
1048 /* fallthrough */
1049 }
1050 case ASCE_TYPE_SEGMENT: {
1051 union segment_table_entry ste;
1052
1053 rc = gmap_read_table(parent, ptr + vaddr.sx * 8, &ste.val);
1054 if (rc)
1055 return rc;
1056 if (ste.i)
1057 return PGM_SEGMENT_TRANSLATION;
1058 if (ste.tt != TABLE_TYPE_SEGMENT)
1059 return PGM_TRANSLATION_SPEC;
1060 if (ste.cs && asce.p)
1061 return PGM_TRANSLATION_SPEC;
1062 *dat_protection = ste.fc0.p;
1063 rc = gmap_shadow_pgt(sg, saddr, ste.val);
1064 if (rc)
1065 return rc;
1066 ptr = ste.fc0.pto * 2048;
1067 }
1068 }
1069 /* Return the parent address of the page table */
1070 *pgt = ptr;
1071 return 0;
1072}
1073
1074/**
1075 * kvm_s390_shadow_fault - handle fault on a shadow page table
1076 * @sg: pointer to the shadow guest address space structure
1077 * @saddr: faulting address in the shadow gmap
1078 * @write: =1 map r/w, =0 map r/o
1079 *
1080 * Returns: - 0 if the shadow fault was successfully resolved
1081 * - > 0 (pgm exception code) on exceptions while faulting
1082 * - -EAGAIN if the caller can retry immediately
1083 * - -EFAULT when accessing invalid guest addresses
1084 * - -ENOMEM if out of memory
1085 */
1086int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write)
1087{
1088 union vaddress vaddr;
1089 union page_table_entry pte;
1090 unsigned long pgt;
1091 int dat_protection;
1092 int rc;
1093
1094 rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection);
1095 if (rc) {
1096 rc = kvm_s390_shadow_tables(sg, saddr, &pgt, &dat_protection);
1097 if (rc)
1098 return rc;
1099 }
1100
1101 vaddr.addr = saddr;
1102 rc = gmap_read_table(sg->parent, pgt + vaddr.px * 8, &pte.val);
1103 if (rc)
1104 return rc;
1105 if (pte.i)
1106 return PGM_PAGE_TRANSLATION;
1107 if (pte.z || pte.co)
1108 return PGM_TRANSLATION_SPEC;
1109 dat_protection |= pte.p;
1110 if (write && dat_protection)
1111 return PGM_PROTECTION;
1112 rc = gmap_shadow_page(sg, saddr, pte.pfra * 4096, write);
1113 if (rc)
1114 return rc;
1115 return 0;
1116}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index df0a79dd8159..e5ec4734d42d 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -361,4 +361,6 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
361int ipte_lock_held(struct kvm_vcpu *vcpu); 361int ipte_lock_held(struct kvm_vcpu *vcpu);
362int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); 362int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
363 363
364int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr, int write);
365
364#endif /* __KVM_S390_GACCESS_H */ 366#endif /* __KVM_S390_GACCESS_H */