diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-10-14 09:25:52 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-10-14 10:39:52 -0400 |
commit | b10d92a54dac25a6152f1aa1ffc95c12908035ce (patch) | |
tree | a3b91faba082c51fcd49319ef355e3783494b832 | |
parent | 25188b9986cf6b0cadcf1bc1d1693a2e9c50ed47 (diff) |
KVM: x86: fix RSM into 64-bit protected mode
In order to get into 64-bit protected mode, you need to enable
paging while EFER.LMA=1. For this to work, CS.L must be 0.
Currently, we load the segments before CR0 and CR4, which means
that if RSM returns into 64-bit protected mode CS.L is already 1
and everything breaks.
Luckily, CS.L=0 is always the case when executing RSM, because it
is forbidden to execute RSM from 64-bit protected mode. Hence it
is enough to load CR0 and CR4 first, and only then the segments.
Fixes: 660a5d517aaab9187f93854425c4c63f4a09195c
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/emulate.c | 10 |
1 files changed, 7 insertions, 3 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index b372a7557c16..9da95b9daf8d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2418,7 +2418,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2418 | u64 val, cr0, cr4; | 2418 | u64 val, cr0, cr4; |
2419 | u32 base3; | 2419 | u32 base3; |
2420 | u16 selector; | 2420 | u16 selector; |
2421 | int i; | 2421 | int i, r; |
2422 | 2422 | ||
2423 | for (i = 0; i < 16; i++) | 2423 | for (i = 0; i < 16; i++) |
2424 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); | 2424 | *reg_write(ctxt, i) = GET_SMSTATE(u64, smbase, 0x7ff8 - i * 8); |
@@ -2460,13 +2460,17 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt, u64 smbase) | |||
2460 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); | 2460 | dt.address = GET_SMSTATE(u64, smbase, 0x7e68); |
2461 | ctxt->ops->set_gdt(ctxt, &dt); | 2461 | ctxt->ops->set_gdt(ctxt, &dt); |
2462 | 2462 | ||
2463 | r = rsm_enter_protected_mode(ctxt, cr0, cr4); | ||
2464 | if (r != X86EMUL_CONTINUE) | ||
2465 | return r; | ||
2466 | |||
2463 | for (i = 0; i < 6; i++) { | 2467 | for (i = 0; i < 6; i++) { |
2464 | int r = rsm_load_seg_64(ctxt, smbase, i); | 2468 | r = rsm_load_seg_64(ctxt, smbase, i); |
2465 | if (r != X86EMUL_CONTINUE) | 2469 | if (r != X86EMUL_CONTINUE) |
2466 | return r; | 2470 | return r; |
2467 | } | 2471 | } |
2468 | 2472 | ||
2469 | return rsm_enter_protected_mode(ctxt, cr0, cr4); | 2473 | return X86EMUL_CONTINUE; |
2470 | } | 2474 | } |
2471 | 2475 | ||
2472 | static int em_rsm(struct x86_emulate_ctxt *ctxt) | 2476 | static int em_rsm(struct x86_emulate_ctxt *ctxt) |