aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-11-22 10:53:22 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:54 -0500
commitbcc55cba9f1fcda68412c8c3d8579c56d90b16f2 (patch)
tree1f4c45ad993e66effaaf2aa0bc7d2ebade6c41d0
parentda9cb575b1127f84984b8ad6d973dcc05ac036dd (diff)
KVM: x86 emulator: make emulator memory callbacks return full exception
This way, they can return #GP, not just #PF. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_emulate.h15
-rw-r--r--arch/x86/kvm/emulate.c89
-rw-r--r--arch/x86/kvm/x86.c76
3 files changed, 84 insertions, 96 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b7c11270ae8f..87d017e276f4 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -70,7 +70,8 @@ struct x86_emulate_ops {
70 * @bytes: [IN ] Number of bytes to read from memory. 70 * @bytes: [IN ] Number of bytes to read from memory.
71 */ 71 */
72 int (*read_std)(unsigned long addr, void *val, 72 int (*read_std)(unsigned long addr, void *val,
73 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 73 unsigned int bytes, struct kvm_vcpu *vcpu,
74 struct x86_exception *fault);
74 75
75 /* 76 /*
76 * write_std: Write bytes of standard (non-emulated/special) memory. 77 * write_std: Write bytes of standard (non-emulated/special) memory.
@@ -80,7 +81,8 @@ struct x86_emulate_ops {
80 * @bytes: [IN ] Number of bytes to write to memory. 81 * @bytes: [IN ] Number of bytes to write to memory.
81 */ 82 */
82 int (*write_std)(unsigned long addr, void *val, 83 int (*write_std)(unsigned long addr, void *val,
83 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 84 unsigned int bytes, struct kvm_vcpu *vcpu,
85 struct x86_exception *fault);
84 /* 86 /*
85 * fetch: Read bytes of standard (non-emulated/special) memory. 87 * fetch: Read bytes of standard (non-emulated/special) memory.
86 * Used for instruction fetch. 88 * Used for instruction fetch.
@@ -89,7 +91,8 @@ struct x86_emulate_ops {
89 * @bytes: [IN ] Number of bytes to read from memory. 91 * @bytes: [IN ] Number of bytes to read from memory.
90 */ 92 */
91 int (*fetch)(unsigned long addr, void *val, 93 int (*fetch)(unsigned long addr, void *val,
92 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); 94 unsigned int bytes, struct kvm_vcpu *vcpu,
95 struct x86_exception *fault);
93 96
94 /* 97 /*
95 * read_emulated: Read bytes from emulated/special memory area. 98 * read_emulated: Read bytes from emulated/special memory area.
@@ -100,7 +103,7 @@ struct x86_emulate_ops {
100 int (*read_emulated)(unsigned long addr, 103 int (*read_emulated)(unsigned long addr,
101 void *val, 104 void *val,
102 unsigned int bytes, 105 unsigned int bytes,
103 unsigned int *error, 106 struct x86_exception *fault,
104 struct kvm_vcpu *vcpu); 107 struct kvm_vcpu *vcpu);
105 108
106 /* 109 /*
@@ -113,7 +116,7 @@ struct x86_emulate_ops {
113 int (*write_emulated)(unsigned long addr, 116 int (*write_emulated)(unsigned long addr,
114 const void *val, 117 const void *val,
115 unsigned int bytes, 118 unsigned int bytes,
116 unsigned int *error, 119 struct x86_exception *fault,
117 struct kvm_vcpu *vcpu); 120 struct kvm_vcpu *vcpu);
118 121
119 /* 122 /*
@@ -128,7 +131,7 @@ struct x86_emulate_ops {
128 const void *old, 131 const void *old,
129 const void *new, 132 const void *new,
130 unsigned int bytes, 133 unsigned int bytes,
131 unsigned int *error, 134 struct x86_exception *fault,
132 struct kvm_vcpu *vcpu); 135 struct kvm_vcpu *vcpu);
133 136
134 int (*pio_in_emulated)(int size, unsigned short port, void *val, 137 int (*pio_in_emulated)(int size, unsigned short port, void *val,
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 18596e6649aa..16ed6c178bb2 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -512,7 +512,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
512 cur_size = fc->end - fc->start; 512 cur_size = fc->end - fc->start;
513 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); 513 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
514 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size, 514 rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size,
515 size, ctxt->vcpu, NULL); 515 size, ctxt->vcpu, &ctxt->exception);
516 if (rc != X86EMUL_CONTINUE) 516 if (rc != X86EMUL_CONTINUE)
517 return rc; 517 return rc;
518 fc->end += size; 518 fc->end += size;
@@ -565,12 +565,12 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
565 op_bytes = 3; 565 op_bytes = 3;
566 *address = 0; 566 *address = 0;
567 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2, 567 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
568 ctxt->vcpu, NULL); 568 ctxt->vcpu, &ctxt->exception);
569 if (rc != X86EMUL_CONTINUE) 569 if (rc != X86EMUL_CONTINUE)
570 return rc; 570 return rc;
571 addr.ea += 2; 571 addr.ea += 2;
572 rc = ops->read_std(linear(ctxt, addr), address, op_bytes, 572 rc = ops->read_std(linear(ctxt, addr), address, op_bytes,
573 ctxt->vcpu, NULL); 573 ctxt->vcpu, &ctxt->exception);
574 return rc; 574 return rc;
575} 575}
576 576
@@ -816,7 +816,6 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
816{ 816{
817 int rc; 817 int rc;
818 struct read_cache *mc = &ctxt->decode.mem_read; 818 struct read_cache *mc = &ctxt->decode.mem_read;
819 u32 err;
820 819
821 while (size) { 820 while (size) {
822 int n = min(size, 8u); 821 int n = min(size, 8u);
@@ -824,10 +823,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
824 if (mc->pos < mc->end) 823 if (mc->pos < mc->end)
825 goto read_cached; 824 goto read_cached;
826 825
827 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, 826 rc = ops->read_emulated(addr, mc->data + mc->end, n,
828 ctxt->vcpu); 827 &ctxt->exception, ctxt->vcpu);
829 if (rc == X86EMUL_PROPAGATE_FAULT)
830 emulate_pf(ctxt);
831 if (rc != X86EMUL_CONTINUE) 828 if (rc != X86EMUL_CONTINUE)
832 return rc; 829 return rc;
833 mc->end += n; 830 mc->end += n;
@@ -902,7 +899,6 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
902 struct desc_ptr dt; 899 struct desc_ptr dt;
903 u16 index = selector >> 3; 900 u16 index = selector >> 3;
904 int ret; 901 int ret;
905 u32 err;
906 ulong addr; 902 ulong addr;
907 903
908 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 904 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
@@ -912,9 +908,8 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
912 return X86EMUL_PROPAGATE_FAULT; 908 return X86EMUL_PROPAGATE_FAULT;
913 } 909 }
914 addr = dt.address + index * 8; 910 addr = dt.address + index * 8;
915 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 911 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
916 if (ret == X86EMUL_PROPAGATE_FAULT) 912 &ctxt->exception);
917 emulate_pf(ctxt);
918 913
919 return ret; 914 return ret;
920} 915}
@@ -926,7 +921,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
926{ 921{
927 struct desc_ptr dt; 922 struct desc_ptr dt;
928 u16 index = selector >> 3; 923 u16 index = selector >> 3;
929 u32 err;
930 ulong addr; 924 ulong addr;
931 int ret; 925 int ret;
932 926
@@ -938,9 +932,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
938 } 932 }
939 933
940 addr = dt.address + index * 8; 934 addr = dt.address + index * 8;
941 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 935 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
942 if (ret == X86EMUL_PROPAGATE_FAULT) 936 &ctxt->exception);
943 emulate_pf(ctxt);
944 937
945 return ret; 938 return ret;
946} 939}
@@ -1087,7 +1080,6 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1087{ 1080{
1088 int rc; 1081 int rc;
1089 struct decode_cache *c = &ctxt->decode; 1082 struct decode_cache *c = &ctxt->decode;
1090 u32 err;
1091 1083
1092 switch (c->dst.type) { 1084 switch (c->dst.type) {
1093 case OP_REG: 1085 case OP_REG:
@@ -1100,17 +1092,15 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1100 &c->dst.orig_val, 1092 &c->dst.orig_val,
1101 &c->dst.val, 1093 &c->dst.val,
1102 c->dst.bytes, 1094 c->dst.bytes,
1103 &err, 1095 &ctxt->exception,
1104 ctxt->vcpu); 1096 ctxt->vcpu);
1105 else 1097 else
1106 rc = ops->write_emulated( 1098 rc = ops->write_emulated(
1107 linear(ctxt, c->dst.addr.mem), 1099 linear(ctxt, c->dst.addr.mem),
1108 &c->dst.val, 1100 &c->dst.val,
1109 c->dst.bytes, 1101 c->dst.bytes,
1110 &err, 1102 &ctxt->exception,
1111 ctxt->vcpu); 1103 ctxt->vcpu);
1112 if (rc == X86EMUL_PROPAGATE_FAULT)
1113 emulate_pf(ctxt);
1114 if (rc != X86EMUL_CONTINUE) 1104 if (rc != X86EMUL_CONTINUE)
1115 return rc; 1105 return rc;
1116 break; 1106 break;
@@ -1283,7 +1273,6 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1283 gva_t cs_addr; 1273 gva_t cs_addr;
1284 gva_t eip_addr; 1274 gva_t eip_addr;
1285 u16 cs, eip; 1275 u16 cs, eip;
1286 u32 err;
1287 1276
1288 /* TODO: Add limit checks */ 1277 /* TODO: Add limit checks */
1289 c->src.val = ctxt->eflags; 1278 c->src.val = ctxt->eflags;
@@ -1313,11 +1302,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1313 eip_addr = dt.address + (irq << 2); 1302 eip_addr = dt.address + (irq << 2);
1314 cs_addr = dt.address + (irq << 2) + 2; 1303 cs_addr = dt.address + (irq << 2) + 2;
1315 1304
1316 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &err); 1305 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception);
1317 if (rc != X86EMUL_CONTINUE) 1306 if (rc != X86EMUL_CONTINUE)
1318 return rc; 1307 return rc;
1319 1308
1320 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &err); 1309 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception);
1321 if (rc != X86EMUL_CONTINUE) 1310 if (rc != X86EMUL_CONTINUE)
1322 return rc; 1311 return rc;
1323 1312
@@ -1930,33 +1919,27 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1930{ 1919{
1931 struct tss_segment_16 tss_seg; 1920 struct tss_segment_16 tss_seg;
1932 int ret; 1921 int ret;
1933 u32 err, new_tss_base = get_desc_base(new_desc); 1922 u32 new_tss_base = get_desc_base(new_desc);
1934 1923
1935 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 1924 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1936 &err); 1925 &ctxt->exception);
1937 if (ret == X86EMUL_PROPAGATE_FAULT) { 1926 if (ret == X86EMUL_PROPAGATE_FAULT)
1938 /* FIXME: need to provide precise fault address */ 1927 /* FIXME: need to provide precise fault address */
1939 emulate_pf(ctxt);
1940 return ret; 1928 return ret;
1941 }
1942 1929
1943 save_state_to_tss16(ctxt, ops, &tss_seg); 1930 save_state_to_tss16(ctxt, ops, &tss_seg);
1944 1931
1945 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 1932 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1946 &err); 1933 &ctxt->exception);
1947 if (ret == X86EMUL_PROPAGATE_FAULT) { 1934 if (ret == X86EMUL_PROPAGATE_FAULT)
1948 /* FIXME: need to provide precise fault address */ 1935 /* FIXME: need to provide precise fault address */
1949 emulate_pf(ctxt);
1950 return ret; 1936 return ret;
1951 }
1952 1937
1953 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 1938 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
1954 &err); 1939 &ctxt->exception);
1955 if (ret == X86EMUL_PROPAGATE_FAULT) { 1940 if (ret == X86EMUL_PROPAGATE_FAULT)
1956 /* FIXME: need to provide precise fault address */ 1941 /* FIXME: need to provide precise fault address */
1957 emulate_pf(ctxt);
1958 return ret; 1942 return ret;
1959 }
1960 1943
1961 if (old_tss_sel != 0xffff) { 1944 if (old_tss_sel != 0xffff) {
1962 tss_seg.prev_task_link = old_tss_sel; 1945 tss_seg.prev_task_link = old_tss_sel;
@@ -1964,12 +1947,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
1964 ret = ops->write_std(new_tss_base, 1947 ret = ops->write_std(new_tss_base,
1965 &tss_seg.prev_task_link, 1948 &tss_seg.prev_task_link,
1966 sizeof tss_seg.prev_task_link, 1949 sizeof tss_seg.prev_task_link,
1967 ctxt->vcpu, &err); 1950 ctxt->vcpu, &ctxt->exception);
1968 if (ret == X86EMUL_PROPAGATE_FAULT) { 1951 if (ret == X86EMUL_PROPAGATE_FAULT)
1969 /* FIXME: need to provide precise fault address */ 1952 /* FIXME: need to provide precise fault address */
1970 emulate_pf(ctxt);
1971 return ret; 1953 return ret;
1972 }
1973 } 1954 }
1974 1955
1975 return load_state_from_tss16(ctxt, ops, &tss_seg); 1956 return load_state_from_tss16(ctxt, ops, &tss_seg);
@@ -2072,33 +2053,27 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2072{ 2053{
2073 struct tss_segment_32 tss_seg; 2054 struct tss_segment_32 tss_seg;
2074 int ret; 2055 int ret;
2075 u32 err, new_tss_base = get_desc_base(new_desc); 2056 u32 new_tss_base = get_desc_base(new_desc);
2076 2057
2077 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2058 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2078 &err); 2059 &ctxt->exception);
2079 if (ret == X86EMUL_PROPAGATE_FAULT) { 2060 if (ret == X86EMUL_PROPAGATE_FAULT)
2080 /* FIXME: need to provide precise fault address */ 2061 /* FIXME: need to provide precise fault address */
2081 emulate_pf(ctxt);
2082 return ret; 2062 return ret;
2083 }
2084 2063
2085 save_state_to_tss32(ctxt, ops, &tss_seg); 2064 save_state_to_tss32(ctxt, ops, &tss_seg);
2086 2065
2087 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2066 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2088 &err); 2067 &ctxt->exception);
2089 if (ret == X86EMUL_PROPAGATE_FAULT) { 2068 if (ret == X86EMUL_PROPAGATE_FAULT)
2090 /* FIXME: need to provide precise fault address */ 2069 /* FIXME: need to provide precise fault address */
2091 emulate_pf(ctxt);
2092 return ret; 2070 return ret;
2093 }
2094 2071
2095 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2072 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu,
2096 &err); 2073 &ctxt->exception);
2097 if (ret == X86EMUL_PROPAGATE_FAULT) { 2074 if (ret == X86EMUL_PROPAGATE_FAULT)
2098 /* FIXME: need to provide precise fault address */ 2075 /* FIXME: need to provide precise fault address */
2099 emulate_pf(ctxt);
2100 return ret; 2076 return ret;
2101 }
2102 2077
2103 if (old_tss_sel != 0xffff) { 2078 if (old_tss_sel != 0xffff) {
2104 tss_seg.prev_task_link = old_tss_sel; 2079 tss_seg.prev_task_link = old_tss_sel;
@@ -2106,12 +2081,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2106 ret = ops->write_std(new_tss_base, 2081 ret = ops->write_std(new_tss_base,
2107 &tss_seg.prev_task_link, 2082 &tss_seg.prev_task_link,
2108 sizeof tss_seg.prev_task_link, 2083 sizeof tss_seg.prev_task_link,
2109 ctxt->vcpu, &err); 2084 ctxt->vcpu, &ctxt->exception);
2110 if (ret == X86EMUL_PROPAGATE_FAULT) { 2085 if (ret == X86EMUL_PROPAGATE_FAULT)
2111 /* FIXME: need to provide precise fault address */ 2086 /* FIXME: need to provide precise fault address */
2112 emulate_pf(ctxt);
2113 return ret; 2087 return ret;
2114 }
2115 } 2088 }
2116 2089
2117 return load_state_from_tss32(ctxt, ops, &tss_seg); 2090 return load_state_from_tss32(ctxt, ops, &tss_seg);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0c908321e900..8311ed909c49 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3642,24 +3642,31 @@ gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3642 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error); 3642 return vcpu->arch.walk_mmu->gva_to_gpa(vcpu, gva, 0, error);
3643} 3643}
3644 3644
3645static int make_page_fault(struct x86_exception *exception, u32 error)
3646{
3647 exception->vector = PF_VECTOR;
3648 exception->error_code_valid = true;
3649 exception->error_code = error;
3650 return X86EMUL_PROPAGATE_FAULT;
3651}
3652
3645static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, 3653static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3646 struct kvm_vcpu *vcpu, u32 access, 3654 struct kvm_vcpu *vcpu, u32 access,
3647 u32 *error) 3655 struct x86_exception *exception)
3648{ 3656{
3649 void *data = val; 3657 void *data = val;
3650 int r = X86EMUL_CONTINUE; 3658 int r = X86EMUL_CONTINUE;
3659 u32 error;
3651 3660
3652 while (bytes) { 3661 while (bytes) {
3653 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access, 3662 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access,
3654 error); 3663 &error);
3655 unsigned offset = addr & (PAGE_SIZE-1); 3664 unsigned offset = addr & (PAGE_SIZE-1);
3656 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3665 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3657 int ret; 3666 int ret;
3658 3667
3659 if (gpa == UNMAPPED_GVA) { 3668 if (gpa == UNMAPPED_GVA)
3660 r = X86EMUL_PROPAGATE_FAULT; 3669 return make_page_fault(exception, error);
3661 goto out;
3662 }
3663 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); 3670 ret = kvm_read_guest(vcpu->kvm, gpa, data, toread);
3664 if (ret < 0) { 3671 if (ret < 0) {
3665 r = X86EMUL_IO_NEEDED; 3672 r = X86EMUL_IO_NEEDED;
@@ -3676,47 +3683,50 @@ out:
3676 3683
3677/* used for instruction fetching */ 3684/* used for instruction fetching */
3678static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, 3685static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3679 struct kvm_vcpu *vcpu, u32 *error) 3686 struct kvm_vcpu *vcpu,
3687 struct x86_exception *exception)
3680{ 3688{
3681 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3689 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3682 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 3690 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3683 access | PFERR_FETCH_MASK, error); 3691 access | PFERR_FETCH_MASK,
3692 exception);
3684} 3693}
3685 3694
3686static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, 3695static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3687 struct kvm_vcpu *vcpu, u32 *error) 3696 struct kvm_vcpu *vcpu,
3697 struct x86_exception *exception)
3688{ 3698{
3689 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3699 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3690 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 3700 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3691 error); 3701 exception);
3692} 3702}
3693 3703
3694static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, 3704static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3695 struct kvm_vcpu *vcpu, u32 *error) 3705 struct kvm_vcpu *vcpu,
3706 struct x86_exception *exception)
3696{ 3707{
3697 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); 3708 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
3698} 3709}
3699 3710
3700static int kvm_write_guest_virt_system(gva_t addr, void *val, 3711static int kvm_write_guest_virt_system(gva_t addr, void *val,
3701 unsigned int bytes, 3712 unsigned int bytes,
3702 struct kvm_vcpu *vcpu, 3713 struct kvm_vcpu *vcpu,
3703 u32 *error) 3714 struct x86_exception *exception)
3704{ 3715{
3705 void *data = val; 3716 void *data = val;
3706 int r = X86EMUL_CONTINUE; 3717 int r = X86EMUL_CONTINUE;
3718 u32 error;
3707 3719
3708 while (bytes) { 3720 while (bytes) {
3709 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, 3721 gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr,
3710 PFERR_WRITE_MASK, 3722 PFERR_WRITE_MASK,
3711 error); 3723 &error);
3712 unsigned offset = addr & (PAGE_SIZE-1); 3724 unsigned offset = addr & (PAGE_SIZE-1);
3713 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3725 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3714 int ret; 3726 int ret;
3715 3727
3716 if (gpa == UNMAPPED_GVA) { 3728 if (gpa == UNMAPPED_GVA)
3717 r = X86EMUL_PROPAGATE_FAULT; 3729 return make_page_fault(exception, error);
3718 goto out;
3719 }
3720 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite); 3730 ret = kvm_write_guest(vcpu->kvm, gpa, data, towrite);
3721 if (ret < 0) { 3731 if (ret < 0) {
3722 r = X86EMUL_IO_NEEDED; 3732 r = X86EMUL_IO_NEEDED;
@@ -3734,10 +3744,11 @@ out:
3734static int emulator_read_emulated(unsigned long addr, 3744static int emulator_read_emulated(unsigned long addr,
3735 void *val, 3745 void *val,
3736 unsigned int bytes, 3746 unsigned int bytes,
3737 unsigned int *error_code, 3747 struct x86_exception *exception,
3738 struct kvm_vcpu *vcpu) 3748 struct kvm_vcpu *vcpu)
3739{ 3749{
3740 gpa_t gpa; 3750 gpa_t gpa;
3751 u32 error_code;
3741 3752
3742 if (vcpu->mmio_read_completed) { 3753 if (vcpu->mmio_read_completed) {
3743 memcpy(val, vcpu->mmio_data, bytes); 3754 memcpy(val, vcpu->mmio_data, bytes);
@@ -3747,17 +3758,17 @@ static int emulator_read_emulated(unsigned long addr,
3747 return X86EMUL_CONTINUE; 3758 return X86EMUL_CONTINUE;
3748 } 3759 }
3749 3760
3750 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, error_code); 3761 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
3751 3762
3752 if (gpa == UNMAPPED_GVA) 3763 if (gpa == UNMAPPED_GVA)
3753 return X86EMUL_PROPAGATE_FAULT; 3764 return make_page_fault(exception, error_code);
3754 3765
3755 /* For APIC access vmexit */ 3766 /* For APIC access vmexit */
3756 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3767 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3757 goto mmio; 3768 goto mmio;
3758 3769
3759 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL) 3770 if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception)
3760 == X86EMUL_CONTINUE) 3771 == X86EMUL_CONTINUE)
3761 return X86EMUL_CONTINUE; 3772 return X86EMUL_CONTINUE;
3762 3773
3763mmio: 3774mmio:
@@ -3781,7 +3792,7 @@ mmio:
3781} 3792}
3782 3793
3783int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa, 3794int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3784 const void *val, int bytes) 3795 const void *val, int bytes)
3785{ 3796{
3786 int ret; 3797 int ret;
3787 3798
@@ -3795,15 +3806,16 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
3795static int emulator_write_emulated_onepage(unsigned long addr, 3806static int emulator_write_emulated_onepage(unsigned long addr,
3796 const void *val, 3807 const void *val,
3797 unsigned int bytes, 3808 unsigned int bytes,
3798 unsigned int *error_code, 3809 struct x86_exception *exception,
3799 struct kvm_vcpu *vcpu) 3810 struct kvm_vcpu *vcpu)
3800{ 3811{
3801 gpa_t gpa; 3812 gpa_t gpa;
3813 u32 error_code;
3802 3814
3803 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error_code); 3815 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
3804 3816
3805 if (gpa == UNMAPPED_GVA) 3817 if (gpa == UNMAPPED_GVA)
3806 return X86EMUL_PROPAGATE_FAULT; 3818 return make_page_fault(exception, error_code);
3807 3819
3808 /* For APIC access vmexit */ 3820 /* For APIC access vmexit */
3809 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3821 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -3833,7 +3845,7 @@ mmio:
3833int emulator_write_emulated(unsigned long addr, 3845int emulator_write_emulated(unsigned long addr,
3834 const void *val, 3846 const void *val,
3835 unsigned int bytes, 3847 unsigned int bytes,
3836 unsigned int *error_code, 3848 struct x86_exception *exception,
3837 struct kvm_vcpu *vcpu) 3849 struct kvm_vcpu *vcpu)
3838{ 3850{
3839 /* Crossing a page boundary? */ 3851 /* Crossing a page boundary? */
@@ -3841,7 +3853,7 @@ int emulator_write_emulated(unsigned long addr,
3841 int rc, now; 3853 int rc, now;
3842 3854
3843 now = -addr & ~PAGE_MASK; 3855 now = -addr & ~PAGE_MASK;
3844 rc = emulator_write_emulated_onepage(addr, val, now, error_code, 3856 rc = emulator_write_emulated_onepage(addr, val, now, exception,
3845 vcpu); 3857 vcpu);
3846 if (rc != X86EMUL_CONTINUE) 3858 if (rc != X86EMUL_CONTINUE)
3847 return rc; 3859 return rc;
@@ -3849,7 +3861,7 @@ int emulator_write_emulated(unsigned long addr,
3849 val += now; 3861 val += now;
3850 bytes -= now; 3862 bytes -= now;
3851 } 3863 }
3852 return emulator_write_emulated_onepage(addr, val, bytes, error_code, 3864 return emulator_write_emulated_onepage(addr, val, bytes, exception,
3853 vcpu); 3865 vcpu);
3854} 3866}
3855 3867
@@ -3867,7 +3879,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
3867 const void *old, 3879 const void *old,
3868 const void *new, 3880 const void *new,
3869 unsigned int bytes, 3881 unsigned int bytes,
3870 unsigned int *error_code, 3882 struct x86_exception *exception,
3871 struct kvm_vcpu *vcpu) 3883 struct kvm_vcpu *vcpu)
3872{ 3884{
3873 gpa_t gpa; 3885 gpa_t gpa;
@@ -3925,7 +3937,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
3925emul_write: 3937emul_write:
3926 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 3938 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
3927 3939
3928 return emulator_write_emulated(addr, new, bytes, error_code, vcpu); 3940 return emulator_write_emulated(addr, new, bytes, exception, vcpu);
3929} 3941}
3930 3942
3931static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 3943static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)