diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-05-15 12:09:29 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-05-22 11:47:17 -0400 |
commit | 5045b468037dfe1c848827ce10e99d87f5669160 (patch) | |
tree | a3562a257ed1889b7bf5d6dcd4ecb67edc79fd41 /arch/x86/kvm | |
parent | fb5e336b977086557739791ed51955c5913dc773 (diff) |
KVM: x86: check CS.DPL against RPL during task switch
Table 7-1 of the SDM mentions a check that the code segment's
DPL must match the selector's RPL. This was not done by KVM,
fix it.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/emulate.c | 31 |
1 files changed, 17 insertions, 14 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 47e716ef46b7..2fa7ab069817 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1411,7 +1411,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1411 | 1411 | ||
1412 | /* Does not support long mode */ | 1412 | /* Does not support long mode */ |
1413 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | 1413 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
1414 | u16 selector, int seg, u8 cpl) | 1414 | u16 selector, int seg, u8 cpl, bool in_task_switch) |
1415 | { | 1415 | { |
1416 | struct desc_struct seg_desc, old_desc; | 1416 | struct desc_struct seg_desc, old_desc; |
1417 | u8 dpl, rpl; | 1417 | u8 dpl, rpl; |
@@ -1486,6 +1486,9 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1486 | goto exception; | 1486 | goto exception; |
1487 | break; | 1487 | break; |
1488 | case VCPU_SREG_CS: | 1488 | case VCPU_SREG_CS: |
1489 | if (in_task_switch && rpl != dpl) | ||
1490 | goto exception; | ||
1491 | |||
1489 | if (!(seg_desc.type & 8)) | 1492 | if (!(seg_desc.type & 8)) |
1490 | goto exception; | 1493 | goto exception; |
1491 | 1494 | ||
@@ -1547,7 +1550,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1547 | u16 selector, int seg) | 1550 | u16 selector, int seg) |
1548 | { | 1551 | { |
1549 | u8 cpl = ctxt->ops->cpl(ctxt); | 1552 | u8 cpl = ctxt->ops->cpl(ctxt); |
1550 | return __load_segment_descriptor(ctxt, selector, seg, cpl); | 1553 | return __load_segment_descriptor(ctxt, selector, seg, cpl, false); |
1551 | } | 1554 | } |
1552 | 1555 | ||
1553 | static void write_register_operand(struct operand *op) | 1556 | static void write_register_operand(struct operand *op) |
@@ -2440,19 +2443,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2440 | * Now load segment descriptors. If fault happens at this stage | 2443 | * Now load segment descriptors. If fault happens at this stage |
2441 | * it is handled in a context of new task | 2444 | * it is handled in a context of new task |
2442 | */ | 2445 | */ |
2443 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl); | 2446 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); |
2444 | if (ret != X86EMUL_CONTINUE) | 2447 | if (ret != X86EMUL_CONTINUE) |
2445 | return ret; | 2448 | return ret; |
2446 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl); | 2449 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); |
2447 | if (ret != X86EMUL_CONTINUE) | 2450 | if (ret != X86EMUL_CONTINUE) |
2448 | return ret; | 2451 | return ret; |
2449 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl); | 2452 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); |
2450 | if (ret != X86EMUL_CONTINUE) | 2453 | if (ret != X86EMUL_CONTINUE) |
2451 | return ret; | 2454 | return ret; |
2452 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl); | 2455 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); |
2453 | if (ret != X86EMUL_CONTINUE) | 2456 | if (ret != X86EMUL_CONTINUE) |
2454 | return ret; | 2457 | return ret; |
2455 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl); | 2458 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); |
2456 | if (ret != X86EMUL_CONTINUE) | 2459 | if (ret != X86EMUL_CONTINUE) |
2457 | return ret; | 2460 | return ret; |
2458 | 2461 | ||
@@ -2577,25 +2580,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2577 | * Now load segment descriptors. If fault happenes at this stage | 2580 | * Now load segment descriptors. If fault happenes at this stage |
2578 | * it is handled in a context of new task | 2581 | * it is handled in a context of new task |
2579 | */ | 2582 | */ |
2580 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl); | 2583 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); |
2581 | if (ret != X86EMUL_CONTINUE) | 2584 | if (ret != X86EMUL_CONTINUE) |
2582 | return ret; | 2585 | return ret; |
2583 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl); | 2586 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); |
2584 | if (ret != X86EMUL_CONTINUE) | 2587 | if (ret != X86EMUL_CONTINUE) |
2585 | return ret; | 2588 | return ret; |
2586 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl); | 2589 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); |
2587 | if (ret != X86EMUL_CONTINUE) | 2590 | if (ret != X86EMUL_CONTINUE) |
2588 | return ret; | 2591 | return ret; |
2589 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl); | 2592 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); |
2590 | if (ret != X86EMUL_CONTINUE) | 2593 | if (ret != X86EMUL_CONTINUE) |
2591 | return ret; | 2594 | return ret; |
2592 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl); | 2595 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); |
2593 | if (ret != X86EMUL_CONTINUE) | 2596 | if (ret != X86EMUL_CONTINUE) |
2594 | return ret; | 2597 | return ret; |
2595 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl); | 2598 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); |
2596 | if (ret != X86EMUL_CONTINUE) | 2599 | if (ret != X86EMUL_CONTINUE) |
2597 | return ret; | 2600 | return ret; |
2598 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl); | 2601 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); |
2599 | if (ret != X86EMUL_CONTINUE) | 2602 | if (ret != X86EMUL_CONTINUE) |
2600 | return ret; | 2603 | return ret; |
2601 | 2604 | ||