diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2012-10-24 02:07:59 -0400 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2012-10-31 18:36:30 -0400 |
commit | 87da7e66a40532b743cd50972fcf85a1f15b14ea (patch) | |
tree | e553258648881176136774013f0fecff876644bb /arch | |
parent | 35fd3dc58da675d659513384221349ef90749a01 (diff) |
KVM: x86: fix vcpu->mmio_fragments overflow
After commit b3356bf0dbb349 (KVM: emulator: optimize "rep ins" handling),
the pieces of io data can be collected and write them to the guest memory
or MMIO together
Unfortunately, kvm splits the mmio access into 8 bytes and store them to
vcpu->mmio_fragments. If the guest uses "rep ins" to move large data, it
will cause vcpu->mmio_fragments overflow
The bug can be exposed by isapc (-M isapc):
[23154.818733] general protection fault: 0000 [#1] SMP DEBUG_PAGEALLOC
[ ......]
[23154.858083] Call Trace:
[23154.859874] [<ffffffffa04f0e17>] kvm_get_cr8+0x1d/0x28 [kvm]
[23154.861677] [<ffffffffa04fa6d4>] kvm_arch_vcpu_ioctl_run+0xcda/0xe45 [kvm]
[23154.863604] [<ffffffffa04f5a1a>] ? kvm_arch_vcpu_load+0x17b/0x180 [kvm]
Actually, we can use one mmio_fragment to store a large mmio access then
split it when we pass the mmio-exit-info to userspace. After that, we only
need two entries to store mmio info for the cross-mmio pages access
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/x86.c | 60 |
1 files changed, 34 insertions, 26 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1eefebe5d727..224a7e78cb6c 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3779,7 +3779,7 @@ static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3779 | { | 3779 | { |
3780 | struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; | 3780 | struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0]; |
3781 | 3781 | ||
3782 | memcpy(vcpu->run->mmio.data, frag->data, frag->len); | 3782 | memcpy(vcpu->run->mmio.data, frag->data, min(8u, frag->len)); |
3783 | return X86EMUL_CONTINUE; | 3783 | return X86EMUL_CONTINUE; |
3784 | } | 3784 | } |
3785 | 3785 | ||
@@ -3832,18 +3832,11 @@ mmio: | |||
3832 | bytes -= handled; | 3832 | bytes -= handled; |
3833 | val += handled; | 3833 | val += handled; |
3834 | 3834 | ||
3835 | while (bytes) { | 3835 | WARN_ON(vcpu->mmio_nr_fragments >= KVM_MAX_MMIO_FRAGMENTS); |
3836 | unsigned now = min(bytes, 8U); | 3836 | frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; |
3837 | 3837 | frag->gpa = gpa; | |
3838 | frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++]; | 3838 | frag->data = val; |
3839 | frag->gpa = gpa; | 3839 | frag->len = bytes; |
3840 | frag->data = val; | ||
3841 | frag->len = now; | ||
3842 | |||
3843 | gpa += now; | ||
3844 | val += now; | ||
3845 | bytes -= now; | ||
3846 | } | ||
3847 | return X86EMUL_CONTINUE; | 3840 | return X86EMUL_CONTINUE; |
3848 | } | 3841 | } |
3849 | 3842 | ||
@@ -3890,7 +3883,7 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, | |||
3890 | vcpu->mmio_needed = 1; | 3883 | vcpu->mmio_needed = 1; |
3891 | vcpu->mmio_cur_fragment = 0; | 3884 | vcpu->mmio_cur_fragment = 0; |
3892 | 3885 | ||
3893 | vcpu->run->mmio.len = vcpu->mmio_fragments[0].len; | 3886 | vcpu->run->mmio.len = min(8u, vcpu->mmio_fragments[0].len); |
3894 | vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; | 3887 | vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write; |
3895 | vcpu->run->exit_reason = KVM_EXIT_MMIO; | 3888 | vcpu->run->exit_reason = KVM_EXIT_MMIO; |
3896 | vcpu->run->mmio.phys_addr = gpa; | 3889 | vcpu->run->mmio.phys_addr = gpa; |
@@ -5522,28 +5515,44 @@ static int complete_emulated_pio(struct kvm_vcpu *vcpu) | |||
5522 | * | 5515 | * |
5523 | * read: | 5516 | * read: |
5524 | * for each fragment | 5517 | * for each fragment |
5525 | * write gpa, len | 5518 | * for each mmio piece in the fragment |
5526 | * exit | 5519 | * write gpa, len |
5527 | * copy data | 5520 | * exit |
5521 | * copy data | ||
5528 | * execute insn | 5522 | * execute insn |
5529 | * | 5523 | * |
5530 | * write: | 5524 | * write: |
5531 | * for each fragment | 5525 | * for each fragment |
5532 | * write gpa, len | 5526 | * for each mmio piece in the fragment |
5533 | * copy data | 5527 | * write gpa, len |
5534 | * exit | 5528 | * copy data |
5529 | * exit | ||
5535 | */ | 5530 | */ |
5536 | static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | 5531 | static int complete_emulated_mmio(struct kvm_vcpu *vcpu) |
5537 | { | 5532 | { |
5538 | struct kvm_run *run = vcpu->run; | 5533 | struct kvm_run *run = vcpu->run; |
5539 | struct kvm_mmio_fragment *frag; | 5534 | struct kvm_mmio_fragment *frag; |
5535 | unsigned len; | ||
5540 | 5536 | ||
5541 | BUG_ON(!vcpu->mmio_needed); | 5537 | BUG_ON(!vcpu->mmio_needed); |
5542 | 5538 | ||
5543 | /* Complete previous fragment */ | 5539 | /* Complete previous fragment */ |
5544 | frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++]; | 5540 | frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment]; |
5541 | len = min(8u, frag->len); | ||
5545 | if (!vcpu->mmio_is_write) | 5542 | if (!vcpu->mmio_is_write) |
5546 | memcpy(frag->data, run->mmio.data, frag->len); | 5543 | memcpy(frag->data, run->mmio.data, len); |
5544 | |||
5545 | if (frag->len <= 8) { | ||
5546 | /* Switch to the next fragment. */ | ||
5547 | frag++; | ||
5548 | vcpu->mmio_cur_fragment++; | ||
5549 | } else { | ||
5550 | /* Go forward to the next mmio piece. */ | ||
5551 | frag->data += len; | ||
5552 | frag->gpa += len; | ||
5553 | frag->len -= len; | ||
5554 | } | ||
5555 | |||
5547 | if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { | 5556 | if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { |
5548 | vcpu->mmio_needed = 0; | 5557 | vcpu->mmio_needed = 0; |
5549 | if (vcpu->mmio_is_write) | 5558 | if (vcpu->mmio_is_write) |
@@ -5551,13 +5560,12 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | |||
5551 | vcpu->mmio_read_completed = 1; | 5560 | vcpu->mmio_read_completed = 1; |
5552 | return complete_emulated_io(vcpu); | 5561 | return complete_emulated_io(vcpu); |
5553 | } | 5562 | } |
5554 | /* Initiate next fragment */ | 5563 | |
5555 | ++frag; | ||
5556 | run->exit_reason = KVM_EXIT_MMIO; | 5564 | run->exit_reason = KVM_EXIT_MMIO; |
5557 | run->mmio.phys_addr = frag->gpa; | 5565 | run->mmio.phys_addr = frag->gpa; |
5558 | if (vcpu->mmio_is_write) | 5566 | if (vcpu->mmio_is_write) |
5559 | memcpy(run->mmio.data, frag->data, frag->len); | 5567 | memcpy(run->mmio.data, frag->data, min(8u, frag->len)); |
5560 | run->mmio.len = frag->len; | 5568 | run->mmio.len = min(8u, frag->len); |
5561 | run->mmio.is_write = vcpu->mmio_is_write; | 5569 | run->mmio.is_write = vcpu->mmio_is_write; |
5562 | vcpu->arch.complete_userspace_io = complete_emulated_mmio; | 5570 | vcpu->arch.complete_userspace_io = complete_emulated_mmio; |
5563 | return 0; | 5571 | return 0; |