aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2014-06-18 08:53:49 -0400
committerAlexander Graf <agraf@suse.de>2014-07-28 12:30:10 -0400
commitd69614a295aef72f8fb22da8e3ccf1a8f19a7ffc (patch)
treeb8ce894e8738e6711b5593a28a116db5567fe31f
parentc12fb43c2f6d6a57a4e21afe74ff56485d699ee7 (diff)
KVM: PPC: Separate loadstore emulation from priv emulation
Today the instruction emulator can get called via 2 separate code paths. It can either be called by MMIO emulation detection code or by privileged instruction traps. This is bad, as both code paths prepare the environment differently. For MMIO emulation we already know the virtual address we faulted on, so instructions there don't have to actually fetch that information. Split out the two separate use cases into separate files. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h1
-rw-r--r--arch/powerpc/kvm/Makefile4
-rw-r--r--arch/powerpc/kvm/emulate.c192
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c272
-rw-r--r--arch/powerpc/kvm/powerpc.c2
5 files changed, 278 insertions, 193 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 17fa277d297e..2214ee61f668 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -86,6 +86,7 @@ extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
86 bool data); 86 bool data);
87extern int kvmppc_emulate_instruction(struct kvm_run *run, 87extern int kvmppc_emulate_instruction(struct kvm_run *run,
88 struct kvm_vcpu *vcpu); 88 struct kvm_vcpu *vcpu);
89extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 90extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 91extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 92extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 777f8941a8d5..1ccd7a1a441c 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -13,8 +13,9 @@ common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
13CFLAGS_e500_mmu.o := -I. 13CFLAGS_e500_mmu.o := -I.
14CFLAGS_e500_mmu_host.o := -I. 14CFLAGS_e500_mmu_host.o := -I.
15CFLAGS_emulate.o := -I. 15CFLAGS_emulate.o := -I.
16CFLAGS_emulate_loadstore.o := -I.
16 17
17common-objs-y += powerpc.o emulate.o 18common-objs-y += powerpc.o emulate.o emulate_loadstore.o
18obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o 19obj-$(CONFIG_KVM_EXIT_TIMING) += timing.o
19obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o 20obj-$(CONFIG_KVM_BOOK3S_HANDLER) += book3s_exports.o
20 21
@@ -91,6 +92,7 @@ kvm-book3s_64-module-objs += \
91 $(KVM)/eventfd.o \ 92 $(KVM)/eventfd.o \
92 powerpc.o \ 93 powerpc.o \
93 emulate.o \ 94 emulate.o \
95 emulate_loadstore.o \
94 book3s.o \ 96 book3s.o \
95 book3s_64_vio.o \ 97 book3s_64_vio.o \
96 book3s_rtas.o \ 98 book3s_rtas.o \
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index c5c64b6e7eb2..e96b50d0bdab 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -207,25 +207,12 @@ static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
207 return emulated; 207 return emulated;
208} 208}
209 209
210/* XXX to do:
211 * lhax
212 * lhaux
213 * lswx
214 * lswi
215 * stswx
216 * stswi
217 * lha
218 * lhau
219 * lmw
220 * stmw
221 *
222 */
223/* XXX Should probably auto-generate instruction decoding for a particular core 210/* XXX Should probably auto-generate instruction decoding for a particular core
224 * from opcode tables in the future. */ 211 * from opcode tables in the future. */
225int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 212int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
226{ 213{
227 u32 inst; 214 u32 inst;
228 int ra, rs, rt, sprn; 215 int rs, rt, sprn;
229 enum emulation_result emulated; 216 enum emulation_result emulated;
230 int advance = 1; 217 int advance = 1;
231 218
@@ -238,7 +225,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
238 225
239 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 226 pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));
240 227
241 ra = get_ra(inst);
242 rs = get_rs(inst); 228 rs = get_rs(inst);
243 rt = get_rt(inst); 229 rt = get_rt(inst);
244 sprn = get_sprn(inst); 230 sprn = get_sprn(inst);
@@ -270,200 +256,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
270#endif 256#endif
271 advance = 0; 257 advance = 0;
272 break; 258 break;
273 case OP_31_XOP_LWZX:
274 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
275 break;
276
277 case OP_31_XOP_LBZX:
278 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
279 break;
280
281 case OP_31_XOP_LBZUX:
282 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
283 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
284 break;
285
286 case OP_31_XOP_STWX:
287 emulated = kvmppc_handle_store(run, vcpu,
288 kvmppc_get_gpr(vcpu, rs),
289 4, 1);
290 break;
291
292 case OP_31_XOP_STBX:
293 emulated = kvmppc_handle_store(run, vcpu,
294 kvmppc_get_gpr(vcpu, rs),
295 1, 1);
296 break;
297
298 case OP_31_XOP_STBUX:
299 emulated = kvmppc_handle_store(run, vcpu,
300 kvmppc_get_gpr(vcpu, rs),
301 1, 1);
302 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
303 break;
304
305 case OP_31_XOP_LHAX:
306 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
307 break;
308
309 case OP_31_XOP_LHZX:
310 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
311 break;
312
313 case OP_31_XOP_LHZUX:
314 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
315 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
316 break;
317 259
318 case OP_31_XOP_MFSPR: 260 case OP_31_XOP_MFSPR:
319 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); 261 emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
320 break; 262 break;
321 263
322 case OP_31_XOP_STHX:
323 emulated = kvmppc_handle_store(run, vcpu,
324 kvmppc_get_gpr(vcpu, rs),
325 2, 1);
326 break;
327
328 case OP_31_XOP_STHUX:
329 emulated = kvmppc_handle_store(run, vcpu,
330 kvmppc_get_gpr(vcpu, rs),
331 2, 1);
332 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
333 break;
334
335 case OP_31_XOP_MTSPR: 264 case OP_31_XOP_MTSPR:
336 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); 265 emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
337 break; 266 break;
338 267
339 case OP_31_XOP_DCBST:
340 case OP_31_XOP_DCBF:
341 case OP_31_XOP_DCBI:
342 /* Do nothing. The guest is performing dcbi because
343 * hardware DMA is not snooped by the dcache, but
344 * emulated DMA either goes through the dcache as
345 * normal writes, or the host kernel has handled dcache
346 * coherence. */
347 break;
348
349 case OP_31_XOP_LWBRX:
350 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
351 break;
352
353 case OP_31_XOP_TLBSYNC: 268 case OP_31_XOP_TLBSYNC:
354 break; 269 break;
355 270
356 case OP_31_XOP_STWBRX:
357 emulated = kvmppc_handle_store(run, vcpu,
358 kvmppc_get_gpr(vcpu, rs),
359 4, 0);
360 break;
361
362 case OP_31_XOP_LHBRX:
363 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
364 break;
365
366 case OP_31_XOP_STHBRX:
367 emulated = kvmppc_handle_store(run, vcpu,
368 kvmppc_get_gpr(vcpu, rs),
369 2, 0);
370 break;
371
372 default: 271 default:
373 /* Attempt core-specific emulation below. */ 272 /* Attempt core-specific emulation below. */
374 emulated = EMULATE_FAIL; 273 emulated = EMULATE_FAIL;
375 } 274 }
376 break; 275 break;
377 276
378 case OP_LWZ:
379 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
380 break;
381
382 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
383 case OP_LD:
384 rt = get_rt(inst);
385 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
386 break;
387
388 case OP_LWZU:
389 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
390 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
391 break;
392
393 case OP_LBZ:
394 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
395 break;
396
397 case OP_LBZU:
398 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
399 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
400 break;
401
402 case OP_STW:
403 emulated = kvmppc_handle_store(run, vcpu,
404 kvmppc_get_gpr(vcpu, rs),
405 4, 1);
406 break;
407
408 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
409 case OP_STD:
410 rs = get_rs(inst);
411 emulated = kvmppc_handle_store(run, vcpu,
412 kvmppc_get_gpr(vcpu, rs),
413 8, 1);
414 break;
415
416 case OP_STWU:
417 emulated = kvmppc_handle_store(run, vcpu,
418 kvmppc_get_gpr(vcpu, rs),
419 4, 1);
420 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
421 break;
422
423 case OP_STB:
424 emulated = kvmppc_handle_store(run, vcpu,
425 kvmppc_get_gpr(vcpu, rs),
426 1, 1);
427 break;
428
429 case OP_STBU:
430 emulated = kvmppc_handle_store(run, vcpu,
431 kvmppc_get_gpr(vcpu, rs),
432 1, 1);
433 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
434 break;
435
436 case OP_LHZ:
437 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
438 break;
439
440 case OP_LHZU:
441 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
442 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
443 break;
444
445 case OP_LHA:
446 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
447 break;
448
449 case OP_LHAU:
450 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
451 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
452 break;
453
454 case OP_STH:
455 emulated = kvmppc_handle_store(run, vcpu,
456 kvmppc_get_gpr(vcpu, rs),
457 2, 1);
458 break;
459
460 case OP_STHU:
461 emulated = kvmppc_handle_store(run, vcpu,
462 kvmppc_get_gpr(vcpu, rs),
463 2, 1);
464 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
465 break;
466
467 default: 277 default:
468 emulated = EMULATE_FAIL; 278 emulated = EMULATE_FAIL;
469 } 279 }
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
new file mode 100644
index 000000000000..0de4ffa175a9
--- /dev/null
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -0,0 +1,272 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
22#include <linux/hrtimer.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
27
28#include <asm/reg.h>
29#include <asm/time.h>
30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
34#include "timing.h"
35#include "trace.h"
36
37/* XXX to do:
38 * lhax
39 * lhaux
40 * lswx
41 * lswi
42 * stswx
43 * stswi
44 * lha
45 * lhau
46 * lmw
47 * stmw
48 *
49 */
50int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
51{
52 struct kvm_run *run = vcpu->run;
53 u32 inst;
54 int ra, rs, rt;
55 enum emulation_result emulated;
56 int advance = 1;
57
58 /* this default type might be overwritten by subcategories */
59 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
60
61 emulated = kvmppc_get_last_inst(vcpu, false, &inst);
62 if (emulated != EMULATE_DONE)
63 return emulated;
64
65 ra = get_ra(inst);
66 rs = get_rs(inst);
67 rt = get_rt(inst);
68
69 switch (get_op(inst)) {
70 case 31:
71 switch (get_xop(inst)) {
72 case OP_31_XOP_LWZX:
73 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
74 break;
75
76 case OP_31_XOP_LBZX:
77 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
78 break;
79
80 case OP_31_XOP_LBZUX:
81 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
82 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
83 break;
84
85 case OP_31_XOP_STWX:
86 emulated = kvmppc_handle_store(run, vcpu,
87 kvmppc_get_gpr(vcpu, rs),
88 4, 1);
89 break;
90
91 case OP_31_XOP_STBX:
92 emulated = kvmppc_handle_store(run, vcpu,
93 kvmppc_get_gpr(vcpu, rs),
94 1, 1);
95 break;
96
97 case OP_31_XOP_STBUX:
98 emulated = kvmppc_handle_store(run, vcpu,
99 kvmppc_get_gpr(vcpu, rs),
100 1, 1);
101 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
102 break;
103
104 case OP_31_XOP_LHAX:
105 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
106 break;
107
108 case OP_31_XOP_LHZX:
109 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
110 break;
111
112 case OP_31_XOP_LHZUX:
113 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
114 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
115 break;
116
117 case OP_31_XOP_STHX:
118 emulated = kvmppc_handle_store(run, vcpu,
119 kvmppc_get_gpr(vcpu, rs),
120 2, 1);
121 break;
122
123 case OP_31_XOP_STHUX:
124 emulated = kvmppc_handle_store(run, vcpu,
125 kvmppc_get_gpr(vcpu, rs),
126 2, 1);
127 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
128 break;
129
130 case OP_31_XOP_DCBST:
131 case OP_31_XOP_DCBF:
132 case OP_31_XOP_DCBI:
133 /* Do nothing. The guest is performing dcbi because
134 * hardware DMA is not snooped by the dcache, but
135 * emulated DMA either goes through the dcache as
136 * normal writes, or the host kernel has handled dcache
137 * coherence. */
138 break;
139
140 case OP_31_XOP_LWBRX:
141 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
142 break;
143
144 case OP_31_XOP_STWBRX:
145 emulated = kvmppc_handle_store(run, vcpu,
146 kvmppc_get_gpr(vcpu, rs),
147 4, 0);
148 break;
149
150 case OP_31_XOP_LHBRX:
151 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
152 break;
153
154 case OP_31_XOP_STHBRX:
155 emulated = kvmppc_handle_store(run, vcpu,
156 kvmppc_get_gpr(vcpu, rs),
157 2, 0);
158 break;
159
160 default:
161 emulated = EMULATE_FAIL;
162 break;
163 }
164 break;
165
166 case OP_LWZ:
167 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
168 break;
169
170 /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
171 case OP_LD:
172 rt = get_rt(inst);
173 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
174 break;
175
176 case OP_LWZU:
177 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
178 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
179 break;
180
181 case OP_LBZ:
182 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
183 break;
184
185 case OP_LBZU:
186 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
187 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
188 break;
189
190 case OP_STW:
191 emulated = kvmppc_handle_store(run, vcpu,
192 kvmppc_get_gpr(vcpu, rs),
193 4, 1);
194 break;
195
196 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
197 case OP_STD:
198 rs = get_rs(inst);
199 emulated = kvmppc_handle_store(run, vcpu,
200 kvmppc_get_gpr(vcpu, rs),
201 8, 1);
202 break;
203
204 case OP_STWU:
205 emulated = kvmppc_handle_store(run, vcpu,
206 kvmppc_get_gpr(vcpu, rs),
207 4, 1);
208 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
209 break;
210
211 case OP_STB:
212 emulated = kvmppc_handle_store(run, vcpu,
213 kvmppc_get_gpr(vcpu, rs),
214 1, 1);
215 break;
216
217 case OP_STBU:
218 emulated = kvmppc_handle_store(run, vcpu,
219 kvmppc_get_gpr(vcpu, rs),
220 1, 1);
221 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
222 break;
223
224 case OP_LHZ:
225 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
226 break;
227
228 case OP_LHZU:
229 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
230 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
231 break;
232
233 case OP_LHA:
234 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
235 break;
236
237 case OP_LHAU:
238 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
239 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
240 break;
241
242 case OP_STH:
243 emulated = kvmppc_handle_store(run, vcpu,
244 kvmppc_get_gpr(vcpu, rs),
245 2, 1);
246 break;
247
248 case OP_STHU:
249 emulated = kvmppc_handle_store(run, vcpu,
250 kvmppc_get_gpr(vcpu, rs),
251 2, 1);
252 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
253 break;
254
255 default:
256 emulated = EMULATE_FAIL;
257 break;
258 }
259
260 if (emulated == EMULATE_FAIL) {
261 advance = 0;
262 kvmppc_core_queue_program(vcpu, 0);
263 }
264
265 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
266
267 /* Advance past emulated instruction. */
268 if (advance)
269 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
270
271 return emulated;
272}
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 544d1d30c8cc..c14ed15fd60b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -272,7 +272,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
272 enum emulation_result er; 272 enum emulation_result er;
273 int r; 273 int r;
274 274
275 er = kvmppc_emulate_instruction(run, vcpu); 275 er = kvmppc_emulate_loadstore(vcpu);
276 switch (er) { 276 switch (er) {
277 case EMULATE_DONE: 277 case EMULATE_DONE:
278 /* Future optimization: only reload non-volatiles if they were 278 /* Future optimization: only reload non-volatiles if they were