aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-04-20 06:37:53 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:10 -0400
commit0f65dd70a442ff498da10cec0a599fbd9d2d6f9e (patch)
treea460005086619e5959242404c8cd7be0d766de3d /arch
parent7295261cdd42e6d41666df38d1b613cdd9e95f46 (diff)
KVM: x86 emulator: drop vcpu argument from memory read/write callbacks
Making the emulator caller agnostic. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h34
-rw-r--r--arch/x86/kvm/emulate.c54
-rw-r--r--arch/x86/kvm/x86.c54
3 files changed, 75 insertions, 67 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 9b760c8f2576..b4d846708a4b 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -92,8 +92,9 @@ struct x86_emulate_ops {
92 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 92 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
93 * @bytes: [IN ] Number of bytes to read from memory. 93 * @bytes: [IN ] Number of bytes to read from memory.
94 */ 94 */
95 int (*read_std)(unsigned long addr, void *val, 95 int (*read_std)(struct x86_emulate_ctxt *ctxt,
96 unsigned int bytes, struct kvm_vcpu *vcpu, 96 unsigned long addr, void *val,
97 unsigned int bytes,
97 struct x86_exception *fault); 98 struct x86_exception *fault);
98 99
99 /* 100 /*
@@ -103,8 +104,8 @@ struct x86_emulate_ops {
103 * @val: [OUT] Value write to memory, zero-extended to 'u_long'. 104 * @val: [OUT] Value write to memory, zero-extended to 'u_long'.
104 * @bytes: [IN ] Number of bytes to write to memory. 105 * @bytes: [IN ] Number of bytes to write to memory.
105 */ 106 */
106 int (*write_std)(unsigned long addr, void *val, 107 int (*write_std)(struct x86_emulate_ctxt *ctxt,
107 unsigned int bytes, struct kvm_vcpu *vcpu, 108 unsigned long addr, void *val, unsigned int bytes,
108 struct x86_exception *fault); 109 struct x86_exception *fault);
109 /* 110 /*
110 * fetch: Read bytes of standard (non-emulated/special) memory. 111 * fetch: Read bytes of standard (non-emulated/special) memory.
@@ -113,8 +114,8 @@ struct x86_emulate_ops {
113 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 114 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
114 * @bytes: [IN ] Number of bytes to read from memory. 115 * @bytes: [IN ] Number of bytes to read from memory.
115 */ 116 */
116 int (*fetch)(unsigned long addr, void *val, 117 int (*fetch)(struct x86_emulate_ctxt *ctxt,
117 unsigned int bytes, struct kvm_vcpu *vcpu, 118 unsigned long addr, void *val, unsigned int bytes,
118 struct x86_exception *fault); 119 struct x86_exception *fault);
119 120
120 /* 121 /*
@@ -123,11 +124,9 @@ struct x86_emulate_ops {
123 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 124 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
124 * @bytes: [IN ] Number of bytes to read from memory. 125 * @bytes: [IN ] Number of bytes to read from memory.
125 */ 126 */
126 int (*read_emulated)(unsigned long addr, 127 int (*read_emulated)(struct x86_emulate_ctxt *ctxt,
127 void *val, 128 unsigned long addr, void *val, unsigned int bytes,
128 unsigned int bytes, 129 struct x86_exception *fault);
129 struct x86_exception *fault,
130 struct kvm_vcpu *vcpu);
131 130
132 /* 131 /*
133 * write_emulated: Write bytes to emulated/special memory area. 132 * write_emulated: Write bytes to emulated/special memory area.
@@ -136,11 +135,10 @@ struct x86_emulate_ops {
136 * required). 135 * required).
137 * @bytes: [IN ] Number of bytes to write to memory. 136 * @bytes: [IN ] Number of bytes to write to memory.
138 */ 137 */
139 int (*write_emulated)(unsigned long addr, 138 int (*write_emulated)(struct x86_emulate_ctxt *ctxt,
140 const void *val, 139 unsigned long addr, const void *val,
141 unsigned int bytes, 140 unsigned int bytes,
142 struct x86_exception *fault, 141 struct x86_exception *fault);
143 struct kvm_vcpu *vcpu);
144 142
145 /* 143 /*
146 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an 144 * cmpxchg_emulated: Emulate an atomic (LOCKed) CMPXCHG operation on an
@@ -150,12 +148,12 @@ struct x86_emulate_ops {
150 * @new: [IN ] Value to write to @addr. 148 * @new: [IN ] Value to write to @addr.
151 * @bytes: [IN ] Number of bytes to access using CMPXCHG. 149 * @bytes: [IN ] Number of bytes to access using CMPXCHG.
152 */ 150 */
153 int (*cmpxchg_emulated)(unsigned long addr, 151 int (*cmpxchg_emulated)(struct x86_emulate_ctxt *ctxt,
152 unsigned long addr,
154 const void *old, 153 const void *old,
155 const void *new, 154 const void *new,
156 unsigned int bytes, 155 unsigned int bytes,
157 struct x86_exception *fault, 156 struct x86_exception *fault);
158 struct kvm_vcpu *vcpu);
159 157
160 int (*pio_in_emulated)(int size, unsigned short port, void *val, 158 int (*pio_in_emulated)(int size, unsigned short port, void *val,
161 unsigned int count, struct kvm_vcpu *vcpu); 159 unsigned int count, struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4a5b61ff0ae9..ff64b17df772 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -645,8 +645,7 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
645 rc = linearize(ctxt, addr, size, false, &linear); 645 rc = linearize(ctxt, addr, size, false, &linear);
646 if (rc != X86EMUL_CONTINUE) 646 if (rc != X86EMUL_CONTINUE)
647 return rc; 647 return rc;
648 return ctxt->ops->read_std(linear, data, size, ctxt->vcpu, 648 return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
649 &ctxt->exception);
650} 649}
651 650
652static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 651static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -665,8 +664,8 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
665 rc = __linearize(ctxt, addr, size, false, true, &linear); 664 rc = __linearize(ctxt, addr, size, false, true, &linear);
666 if (rc != X86EMUL_CONTINUE) 665 if (rc != X86EMUL_CONTINUE)
667 return rc; 666 return rc;
668 rc = ops->fetch(linear, fc->data + cur_size, 667 rc = ops->fetch(ctxt, linear, fc->data + cur_size,
669 size, ctxt->vcpu, &ctxt->exception); 668 size, &ctxt->exception);
670 if (rc != X86EMUL_CONTINUE) 669 if (rc != X86EMUL_CONTINUE)
671 return rc; 670 return rc;
672 fc->end += size; 671 fc->end += size;
@@ -1047,8 +1046,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
1047 if (mc->pos < mc->end) 1046 if (mc->pos < mc->end)
1048 goto read_cached; 1047 goto read_cached;
1049 1048
1050 rc = ops->read_emulated(addr, mc->data + mc->end, n, 1049 rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1051 &ctxt->exception, ctxt->vcpu); 1050 &ctxt->exception);
1052 if (rc != X86EMUL_CONTINUE) 1051 if (rc != X86EMUL_CONTINUE)
1053 return rc; 1052 return rc;
1054 mc->end += n; 1053 mc->end += n;
@@ -1087,8 +1086,8 @@ static int segmented_write(struct x86_emulate_ctxt *ctxt,
1087 rc = linearize(ctxt, addr, size, true, &linear); 1086 rc = linearize(ctxt, addr, size, true, &linear);
1088 if (rc != X86EMUL_CONTINUE) 1087 if (rc != X86EMUL_CONTINUE)
1089 return rc; 1088 return rc;
1090 return ctxt->ops->write_emulated(linear, data, size, 1089 return ctxt->ops->write_emulated(ctxt, linear, data, size,
1091 &ctxt->exception, ctxt->vcpu); 1090 &ctxt->exception);
1092} 1091}
1093 1092
1094static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, 1093static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
@@ -1102,8 +1101,8 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1102 rc = linearize(ctxt, addr, size, true, &linear); 1101 rc = linearize(ctxt, addr, size, true, &linear);
1103 if (rc != X86EMUL_CONTINUE) 1102 if (rc != X86EMUL_CONTINUE)
1104 return rc; 1103 return rc;
1105 return ctxt->ops->cmpxchg_emulated(linear, orig_data, data, 1104 return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data,
1106 size, &ctxt->exception, ctxt->vcpu); 1105 size, &ctxt->exception);
1107} 1106}
1108 1107
1109static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1108static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
@@ -1168,8 +1167,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1168 if (dt.size < index * 8 + 7) 1167 if (dt.size < index * 8 + 7)
1169 return emulate_gp(ctxt, selector & 0xfffc); 1168 return emulate_gp(ctxt, selector & 0xfffc);
1170 addr = dt.address + index * 8; 1169 addr = dt.address + index * 8;
1171 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, 1170 ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1172 &ctxt->exception);
1173 1171
1174 return ret; 1172 return ret;
1175} 1173}
@@ -1190,8 +1188,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1190 return emulate_gp(ctxt, selector & 0xfffc); 1188 return emulate_gp(ctxt, selector & 0xfffc);
1191 1189
1192 addr = dt.address + index * 8; 1190 addr = dt.address + index * 8;
1193 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, 1191 ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1194 &ctxt->exception);
1195 1192
1196 return ret; 1193 return ret;
1197} 1194}
@@ -1545,11 +1542,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1545 eip_addr = dt.address + (irq << 2); 1542 eip_addr = dt.address + (irq << 2);
1546 cs_addr = dt.address + (irq << 2) + 2; 1543 cs_addr = dt.address + (irq << 2) + 2;
1547 1544
1548 rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception); 1545 rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception);
1549 if (rc != X86EMUL_CONTINUE) 1546 if (rc != X86EMUL_CONTINUE)
1550 return rc; 1547 return rc;
1551 1548
1552 rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception); 1549 rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception);
1553 if (rc != X86EMUL_CONTINUE) 1550 if (rc != X86EMUL_CONTINUE)
1554 return rc; 1551 return rc;
1555 1552
@@ -2036,13 +2033,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2036#ifdef CONFIG_X86_64 2033#ifdef CONFIG_X86_64
2037 base |= ((u64)base3) << 32; 2034 base |= ((u64)base3) << 32;
2038#endif 2035#endif
2039 r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL); 2036 r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL);
2040 if (r != X86EMUL_CONTINUE) 2037 if (r != X86EMUL_CONTINUE)
2041 return false; 2038 return false;
2042 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 2039 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2043 return false; 2040 return false;
2044 r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu, 2041 r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL);
2045 NULL);
2046 if (r != X86EMUL_CONTINUE) 2042 if (r != X86EMUL_CONTINUE)
2047 return false; 2043 return false;
2048 if ((perm >> bit_idx) & mask) 2044 if ((perm >> bit_idx) & mask)
@@ -2150,7 +2146,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2150 int ret; 2146 int ret;
2151 u32 new_tss_base = get_desc_base(new_desc); 2147 u32 new_tss_base = get_desc_base(new_desc);
2152 2148
2153 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2149 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2154 &ctxt->exception); 2150 &ctxt->exception);
2155 if (ret != X86EMUL_CONTINUE) 2151 if (ret != X86EMUL_CONTINUE)
2156 /* FIXME: need to provide precise fault address */ 2152 /* FIXME: need to provide precise fault address */
@@ -2158,13 +2154,13 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2158 2154
2159 save_state_to_tss16(ctxt, ops, &tss_seg); 2155 save_state_to_tss16(ctxt, ops, &tss_seg);
2160 2156
2161 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2157 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2162 &ctxt->exception); 2158 &ctxt->exception);
2163 if (ret != X86EMUL_CONTINUE) 2159 if (ret != X86EMUL_CONTINUE)
2164 /* FIXME: need to provide precise fault address */ 2160 /* FIXME: need to provide precise fault address */
2165 return ret; 2161 return ret;
2166 2162
2167 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2163 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2168 &ctxt->exception); 2164 &ctxt->exception);
2169 if (ret != X86EMUL_CONTINUE) 2165 if (ret != X86EMUL_CONTINUE)
2170 /* FIXME: need to provide precise fault address */ 2166 /* FIXME: need to provide precise fault address */
@@ -2173,10 +2169,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2173 if (old_tss_sel != 0xffff) { 2169 if (old_tss_sel != 0xffff) {
2174 tss_seg.prev_task_link = old_tss_sel; 2170 tss_seg.prev_task_link = old_tss_sel;
2175 2171
2176 ret = ops->write_std(new_tss_base, 2172 ret = ops->write_std(ctxt, new_tss_base,
2177 &tss_seg.prev_task_link, 2173 &tss_seg.prev_task_link,
2178 sizeof tss_seg.prev_task_link, 2174 sizeof tss_seg.prev_task_link,
2179 ctxt->vcpu, &ctxt->exception); 2175 &ctxt->exception);
2180 if (ret != X86EMUL_CONTINUE) 2176 if (ret != X86EMUL_CONTINUE)
2181 /* FIXME: need to provide precise fault address */ 2177 /* FIXME: need to provide precise fault address */
2182 return ret; 2178 return ret;
@@ -2282,7 +2278,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2282 int ret; 2278 int ret;
2283 u32 new_tss_base = get_desc_base(new_desc); 2279 u32 new_tss_base = get_desc_base(new_desc);
2284 2280
2285 ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2281 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2286 &ctxt->exception); 2282 &ctxt->exception);
2287 if (ret != X86EMUL_CONTINUE) 2283 if (ret != X86EMUL_CONTINUE)
2288 /* FIXME: need to provide precise fault address */ 2284 /* FIXME: need to provide precise fault address */
@@ -2290,13 +2286,13 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2290 2286
2291 save_state_to_tss32(ctxt, ops, &tss_seg); 2287 save_state_to_tss32(ctxt, ops, &tss_seg);
2292 2288
2293 ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2289 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2294 &ctxt->exception); 2290 &ctxt->exception);
2295 if (ret != X86EMUL_CONTINUE) 2291 if (ret != X86EMUL_CONTINUE)
2296 /* FIXME: need to provide precise fault address */ 2292 /* FIXME: need to provide precise fault address */
2297 return ret; 2293 return ret;
2298 2294
2299 ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, 2295 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2300 &ctxt->exception); 2296 &ctxt->exception);
2301 if (ret != X86EMUL_CONTINUE) 2297 if (ret != X86EMUL_CONTINUE)
2302 /* FIXME: need to provide precise fault address */ 2298 /* FIXME: need to provide precise fault address */
@@ -2305,10 +2301,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2305 if (old_tss_sel != 0xffff) { 2301 if (old_tss_sel != 0xffff) {
2306 tss_seg.prev_task_link = old_tss_sel; 2302 tss_seg.prev_task_link = old_tss_sel;
2307 2303
2308 ret = ops->write_std(new_tss_base, 2304 ret = ops->write_std(ctxt, new_tss_base,
2309 &tss_seg.prev_task_link, 2305 &tss_seg.prev_task_link,
2310 sizeof tss_seg.prev_task_link, 2306 sizeof tss_seg.prev_task_link,
2311 ctxt->vcpu, &ctxt->exception); 2307 &ctxt->exception);
2312 if (ret != X86EMUL_CONTINUE) 2308 if (ret != X86EMUL_CONTINUE)
2313 /* FIXME: need to provide precise fault address */ 2309 /* FIXME: need to provide precise fault address */
2314 return ret; 2310 return ret;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6aa137701cda..274652ae6d52 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -63,6 +63,9 @@
63#define KVM_MAX_MCE_BANKS 32 63#define KVM_MAX_MCE_BANKS 32
64#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P) 64#define KVM_MCE_CAP_SUPPORTED (MCG_CTL_P | MCG_SER_P)
65 65
66#define emul_to_vcpu(ctxt) \
67 container_of(ctxt, struct kvm_vcpu, arch.emulate_ctxt)
68
66/* EFER defaults: 69/* EFER defaults:
67 * - enable syscall per default because its emulated by KVM 70 * - enable syscall per default because its emulated by KVM
68 * - enable LME and LMA per default on 64 bit KVM 71 * - enable LME and LMA per default on 64 bit KVM
@@ -3760,37 +3763,43 @@ out:
3760} 3763}
3761 3764
3762/* used for instruction fetching */ 3765/* used for instruction fetching */
3763static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes, 3766static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt,
3764 struct kvm_vcpu *vcpu, 3767 gva_t addr, void *val, unsigned int bytes,
3765 struct x86_exception *exception) 3768 struct x86_exception *exception)
3766{ 3769{
3770 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3767 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3771 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3772
3768 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 3773 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3769 access | PFERR_FETCH_MASK, 3774 access | PFERR_FETCH_MASK,
3770 exception); 3775 exception);
3771} 3776}
3772 3777
3773static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, 3778static int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt,
3774 struct kvm_vcpu *vcpu, 3779 gva_t addr, void *val, unsigned int bytes,
3775 struct x86_exception *exception) 3780 struct x86_exception *exception)
3776{ 3781{
3782 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3777 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; 3783 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3784
3778 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access, 3785 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3779 exception); 3786 exception);
3780} 3787}
3781 3788
3782static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, 3789static int kvm_read_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3783 struct kvm_vcpu *vcpu, 3790 gva_t addr, void *val, unsigned int bytes,
3784 struct x86_exception *exception) 3791 struct x86_exception *exception)
3785{ 3792{
3793 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3786 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception); 3794 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, exception);
3787} 3795}
3788 3796
3789static int kvm_write_guest_virt_system(gva_t addr, void *val, 3797static int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt,
3798 gva_t addr, void *val,
3790 unsigned int bytes, 3799 unsigned int bytes,
3791 struct kvm_vcpu *vcpu,
3792 struct x86_exception *exception) 3800 struct x86_exception *exception)
3793{ 3801{
3802 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3794 void *data = val; 3803 void *data = val;
3795 int r = X86EMUL_CONTINUE; 3804 int r = X86EMUL_CONTINUE;
3796 3805
@@ -3818,12 +3827,13 @@ out:
3818 return r; 3827 return r;
3819} 3828}
3820 3829
3821static int emulator_read_emulated(unsigned long addr, 3830static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
3831 unsigned long addr,
3822 void *val, 3832 void *val,
3823 unsigned int bytes, 3833 unsigned int bytes,
3824 struct x86_exception *exception, 3834 struct x86_exception *exception)
3825 struct kvm_vcpu *vcpu)
3826{ 3835{
3836 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3827 gpa_t gpa; 3837 gpa_t gpa;
3828 int handled; 3838 int handled;
3829 3839
@@ -3844,7 +3854,7 @@ static int emulator_read_emulated(unsigned long addr,
3844 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3854 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3845 goto mmio; 3855 goto mmio;
3846 3856
3847 if (kvm_read_guest_virt(addr, val, bytes, vcpu, exception) 3857 if (kvm_read_guest_virt(ctxt, addr, val, bytes, exception)
3848 == X86EMUL_CONTINUE) 3858 == X86EMUL_CONTINUE)
3849 return X86EMUL_CONTINUE; 3859 return X86EMUL_CONTINUE;
3850 3860
@@ -3933,12 +3943,14 @@ mmio:
3933 return X86EMUL_CONTINUE; 3943 return X86EMUL_CONTINUE;
3934} 3944}
3935 3945
3936int emulator_write_emulated(unsigned long addr, 3946int emulator_write_emulated(struct x86_emulate_ctxt *ctxt,
3947 unsigned long addr,
3937 const void *val, 3948 const void *val,
3938 unsigned int bytes, 3949 unsigned int bytes,
3939 struct x86_exception *exception, 3950 struct x86_exception *exception)
3940 struct kvm_vcpu *vcpu)
3941{ 3951{
3952 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3953
3942 /* Crossing a page boundary? */ 3954 /* Crossing a page boundary? */
3943 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 3955 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3944 int rc, now; 3956 int rc, now;
@@ -3966,13 +3978,14 @@ int emulator_write_emulated(unsigned long addr,
3966 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old)) 3978 (cmpxchg64((u64 *)(ptr), *(u64 *)(old), *(u64 *)(new)) == *(u64 *)(old))
3967#endif 3979#endif
3968 3980
3969static int emulator_cmpxchg_emulated(unsigned long addr, 3981static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3982 unsigned long addr,
3970 const void *old, 3983 const void *old,
3971 const void *new, 3984 const void *new,
3972 unsigned int bytes, 3985 unsigned int bytes,
3973 struct x86_exception *exception, 3986 struct x86_exception *exception)
3974 struct kvm_vcpu *vcpu)
3975{ 3987{
3988 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3976 gpa_t gpa; 3989 gpa_t gpa;
3977 struct page *page; 3990 struct page *page;
3978 char *kaddr; 3991 char *kaddr;
@@ -4028,7 +4041,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
4028emul_write: 4041emul_write:
4029 printk_once(KERN_WARNING "kvm: emulating exchange as write\n"); 4042 printk_once(KERN_WARNING "kvm: emulating exchange as write\n");
4030 4043
4031 return emulator_write_emulated(addr, new, bytes, exception, vcpu); 4044 return emulator_write_emulated(ctxt, addr, new, bytes, exception);
4032} 4045}
4033 4046
4034static int kernel_pio(struct kvm_vcpu *vcpu, void *pd) 4047static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
@@ -5009,7 +5022,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu)
5009 5022
5010 kvm_x86_ops->patch_hypercall(vcpu, instruction); 5023 kvm_x86_ops->patch_hypercall(vcpu, instruction);
5011 5024
5012 return emulator_write_emulated(rip, instruction, 3, NULL, vcpu); 5025 return emulator_write_emulated(&vcpu->arch.emulate_ctxt,
5026 rip, instruction, 3, NULL);
5013} 5027}
5014 5028
5015void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base) 5029void realmode_lgdt(struct kvm_vcpu *vcpu, u16 limit, unsigned long base)