aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-03-20 06:46:50 -0400
committerAvi Kivity <avi@qumranet.com>2007-05-03 03:52:25 -0400
commit039576c03c35e2f990ad9bb9c39e1bad3cd60d34 (patch)
treefa6c81a40a36d2c0da1cf20c5deb45cb9bd7ba95 /drivers
parentf0fe510864a4520a85dfa35ae14f5f376c56efc7 (diff)
KVM: Avoid guest virtual addresses in string pio userspace interface
The current string pio interface communicates using guest virtual addresses, relying on userspace to translate addresses and to check permissions. This interface cannot fully support guest smp, as the check needs to take into account two pages at one in case an unaligned string transfer straddles a page boundary. Change the interface not to communicate guest addresses at all; instead use a buffer page (mmaped by userspace) and do transfers there. The kernel manages the virtual to physical translation and can perform the checks atomically by taking the appropriate locks. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/kvm/kvm.h21
-rw-r--r--drivers/kvm/kvm_main.c183
-rw-r--r--drivers/kvm/mmu.c9
-rw-r--r--drivers/kvm/svm.c40
-rw-r--r--drivers/kvm/vmx.c40
5 files changed, 236 insertions, 57 deletions
diff --git a/drivers/kvm/kvm.h b/drivers/kvm/kvm.h
index 1c4a581938bf..7866b34b6c96 100644
--- a/drivers/kvm/kvm.h
+++ b/drivers/kvm/kvm.h
@@ -74,6 +74,8 @@
74 74
75#define IOPL_SHIFT 12 75#define IOPL_SHIFT 12
76 76
77#define KVM_PIO_PAGE_OFFSET 1
78
77/* 79/*
78 * Address types: 80 * Address types:
79 * 81 *
@@ -220,6 +222,18 @@ enum {
220 VCPU_SREG_LDTR, 222 VCPU_SREG_LDTR,
221}; 223};
222 224
225struct kvm_pio_request {
226 unsigned long count;
227 int cur_count;
228 struct page *guest_pages[2];
229 unsigned guest_page_offset;
230 int in;
231 int size;
232 int string;
233 int down;
234 int rep;
235};
236
223struct kvm_vcpu { 237struct kvm_vcpu {
224 struct kvm *kvm; 238 struct kvm *kvm;
225 union { 239 union {
@@ -275,7 +289,8 @@ struct kvm_vcpu {
275 int mmio_size; 289 int mmio_size;
276 unsigned char mmio_data[8]; 290 unsigned char mmio_data[8];
277 gpa_t mmio_phys_addr; 291 gpa_t mmio_phys_addr;
278 int pio_pending; 292 struct kvm_pio_request pio;
293 void *pio_data;
279 294
280 int sigset_active; 295 int sigset_active;
281 sigset_t sigset; 296 sigset_t sigset;
@@ -421,6 +436,7 @@ hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa);
421#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 436#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB)
422static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 437static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; }
423hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); 438hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva);
439struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva);
424 440
425void kvm_emulator_want_group7_invlpg(void); 441void kvm_emulator_want_group7_invlpg(void);
426 442
@@ -453,6 +469,9 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
453 469
454struct x86_emulate_ctxt; 470struct x86_emulate_ctxt;
455 471
472int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
473 int size, unsigned long count, int string, int down,
474 gva_t address, int rep, unsigned port);
456void kvm_emulate_cpuid(struct kvm_vcpu *vcpu); 475void kvm_emulate_cpuid(struct kvm_vcpu *vcpu);
457int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 476int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address);
458int emulate_clts(struct kvm_vcpu *vcpu); 477int emulate_clts(struct kvm_vcpu *vcpu);
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c
index ba7f43a4459e..205998c141fb 100644
--- a/drivers/kvm/kvm_main.c
+++ b/drivers/kvm/kvm_main.c
@@ -346,6 +346,17 @@ static void kvm_free_physmem(struct kvm *kvm)
346 kvm_free_physmem_slot(&kvm->memslots[i], NULL); 346 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
347} 347}
348 348
349static void free_pio_guest_pages(struct kvm_vcpu *vcpu)
350{
351 int i;
352
353 for (i = 0; i < 2; ++i)
354 if (vcpu->pio.guest_pages[i]) {
355 __free_page(vcpu->pio.guest_pages[i]);
356 vcpu->pio.guest_pages[i] = NULL;
357 }
358}
359
349static void kvm_free_vcpu(struct kvm_vcpu *vcpu) 360static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
350{ 361{
351 if (!vcpu->vmcs) 362 if (!vcpu->vmcs)
@@ -357,6 +368,9 @@ static void kvm_free_vcpu(struct kvm_vcpu *vcpu)
357 kvm_arch_ops->vcpu_free(vcpu); 368 kvm_arch_ops->vcpu_free(vcpu);
358 free_page((unsigned long)vcpu->run); 369 free_page((unsigned long)vcpu->run);
359 vcpu->run = NULL; 370 vcpu->run = NULL;
371 free_page((unsigned long)vcpu->pio_data);
372 vcpu->pio_data = NULL;
373 free_pio_guest_pages(vcpu);
360} 374}
361 375
362static void kvm_free_vcpus(struct kvm *kvm) 376static void kvm_free_vcpus(struct kvm *kvm)
@@ -1550,44 +1564,168 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
1550} 1564}
1551EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 1565EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
1552 1566
1553static void complete_pio(struct kvm_vcpu *vcpu) 1567static int pio_copy_data(struct kvm_vcpu *vcpu)
1554{ 1568{
1555 struct kvm_io *io = &vcpu->run->io; 1569 void *p = vcpu->pio_data;
1570 void *q;
1571 unsigned bytes;
1572 int nr_pages = vcpu->pio.guest_pages[1] ? 2 : 1;
1573
1574 kvm_arch_ops->vcpu_put(vcpu);
1575 q = vmap(vcpu->pio.guest_pages, nr_pages, VM_READ|VM_WRITE,
1576 PAGE_KERNEL);
1577 if (!q) {
1578 kvm_arch_ops->vcpu_load(vcpu);
1579 free_pio_guest_pages(vcpu);
1580 return -ENOMEM;
1581 }
1582 q += vcpu->pio.guest_page_offset;
1583 bytes = vcpu->pio.size * vcpu->pio.cur_count;
1584 if (vcpu->pio.in)
1585 memcpy(q, p, bytes);
1586 else
1587 memcpy(p, q, bytes);
1588 q -= vcpu->pio.guest_page_offset;
1589 vunmap(q);
1590 kvm_arch_ops->vcpu_load(vcpu);
1591 free_pio_guest_pages(vcpu);
1592 return 0;
1593}
1594
1595static int complete_pio(struct kvm_vcpu *vcpu)
1596{
1597 struct kvm_pio_request *io = &vcpu->pio;
1556 long delta; 1598 long delta;
1599 int r;
1557 1600
1558 kvm_arch_ops->cache_regs(vcpu); 1601 kvm_arch_ops->cache_regs(vcpu);
1559 1602
1560 if (!io->string) { 1603 if (!io->string) {
1561 if (io->direction == KVM_EXIT_IO_IN) 1604 if (io->in)
1562 memcpy(&vcpu->regs[VCPU_REGS_RAX], &io->value, 1605 memcpy(&vcpu->regs[VCPU_REGS_RAX], vcpu->pio_data,
1563 io->size); 1606 io->size);
1564 } else { 1607 } else {
1608 if (io->in) {
1609 r = pio_copy_data(vcpu);
1610 if (r) {
1611 kvm_arch_ops->cache_regs(vcpu);
1612 return r;
1613 }
1614 }
1615
1565 delta = 1; 1616 delta = 1;
1566 if (io->rep) { 1617 if (io->rep) {
1567 delta *= io->count; 1618 delta *= io->cur_count;
1568 /* 1619 /*
1569 * The size of the register should really depend on 1620 * The size of the register should really depend on
1570 * current address size. 1621 * current address size.
1571 */ 1622 */
1572 vcpu->regs[VCPU_REGS_RCX] -= delta; 1623 vcpu->regs[VCPU_REGS_RCX] -= delta;
1573 } 1624 }
1574 if (io->string_down) 1625 if (io->down)
1575 delta = -delta; 1626 delta = -delta;
1576 delta *= io->size; 1627 delta *= io->size;
1577 if (io->direction == KVM_EXIT_IO_IN) 1628 if (io->in)
1578 vcpu->regs[VCPU_REGS_RDI] += delta; 1629 vcpu->regs[VCPU_REGS_RDI] += delta;
1579 else 1630 else
1580 vcpu->regs[VCPU_REGS_RSI] += delta; 1631 vcpu->regs[VCPU_REGS_RSI] += delta;
1581 } 1632 }
1582 1633
1583 vcpu->pio_pending = 0;
1584 vcpu->run->io_completed = 0; 1634 vcpu->run->io_completed = 0;
1585 1635
1586 kvm_arch_ops->decache_regs(vcpu); 1636 kvm_arch_ops->decache_regs(vcpu);
1587 1637
1588 kvm_arch_ops->skip_emulated_instruction(vcpu); 1638 io->count -= io->cur_count;
1639 io->cur_count = 0;
1640
1641 if (!io->count)
1642 kvm_arch_ops->skip_emulated_instruction(vcpu);
1643 return 0;
1589} 1644}
1590 1645
1646int kvm_setup_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
1647 int size, unsigned long count, int string, int down,
1648 gva_t address, int rep, unsigned port)
1649{
1650 unsigned now, in_page;
1651 int i;
1652 int nr_pages = 1;
1653 struct page *page;
1654
1655 vcpu->run->exit_reason = KVM_EXIT_IO;
1656 vcpu->run->io.direction = in ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT;
1657 vcpu->run->io.size = size;
1658 vcpu->run->io.data_offset = KVM_PIO_PAGE_OFFSET * PAGE_SIZE;
1659 vcpu->run->io.count = count;
1660 vcpu->run->io.port = port;
1661 vcpu->pio.count = count;
1662 vcpu->pio.cur_count = count;
1663 vcpu->pio.size = size;
1664 vcpu->pio.in = in;
1665 vcpu->pio.string = string;
1666 vcpu->pio.down = down;
1667 vcpu->pio.guest_page_offset = offset_in_page(address);
1668 vcpu->pio.rep = rep;
1669
1670 if (!string) {
1671 kvm_arch_ops->cache_regs(vcpu);
1672 memcpy(vcpu->pio_data, &vcpu->regs[VCPU_REGS_RAX], 4);
1673 kvm_arch_ops->decache_regs(vcpu);
1674 return 0;
1675 }
1676
1677 if (!count) {
1678 kvm_arch_ops->skip_emulated_instruction(vcpu);
1679 return 1;
1680 }
1681
1682 now = min(count, PAGE_SIZE / size);
1683
1684 if (!down)
1685 in_page = PAGE_SIZE - offset_in_page(address);
1686 else
1687 in_page = offset_in_page(address) + size;
1688 now = min(count, (unsigned long)in_page / size);
1689 if (!now) {
1690 /*
1691 * String I/O straddles page boundary. Pin two guest pages
1692 * so that we satisfy atomicity constraints. Do just one
1693 * transaction to avoid complexity.
1694 */
1695 nr_pages = 2;
1696 now = 1;
1697 }
1698 if (down) {
1699 /*
1700 * String I/O in reverse. Yuck. Kill the guest, fix later.
1701 */
1702 printk(KERN_ERR "kvm: guest string pio down\n");
1703 inject_gp(vcpu);
1704 return 1;
1705 }
1706 vcpu->run->io.count = now;
1707 vcpu->pio.cur_count = now;
1708
1709 for (i = 0; i < nr_pages; ++i) {
1710 spin_lock(&vcpu->kvm->lock);
1711 page = gva_to_page(vcpu, address + i * PAGE_SIZE);
1712 if (page)
1713 get_page(page);
1714 vcpu->pio.guest_pages[i] = page;
1715 spin_unlock(&vcpu->kvm->lock);
1716 if (!page) {
1717 inject_gp(vcpu);
1718 free_pio_guest_pages(vcpu);
1719 return 1;
1720 }
1721 }
1722
1723 if (!vcpu->pio.in)
1724 return pio_copy_data(vcpu);
1725 return 0;
1726}
1727EXPORT_SYMBOL_GPL(kvm_setup_pio);
1728
1591static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1729static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1592{ 1730{
1593 int r; 1731 int r;
@@ -1602,9 +1740,11 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1602 vcpu->cr8 = kvm_run->cr8; 1740 vcpu->cr8 = kvm_run->cr8;
1603 1741
1604 if (kvm_run->io_completed) { 1742 if (kvm_run->io_completed) {
1605 if (vcpu->pio_pending) 1743 if (vcpu->pio.cur_count) {
1606 complete_pio(vcpu); 1744 r = complete_pio(vcpu);
1607 else { 1745 if (r)
1746 goto out;
1747 } else {
1608 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 1748 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8);
1609 vcpu->mmio_read_completed = 1; 1749 vcpu->mmio_read_completed = 1;
1610 } 1750 }
@@ -1620,6 +1760,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1620 1760
1621 r = kvm_arch_ops->run(vcpu, kvm_run); 1761 r = kvm_arch_ops->run(vcpu, kvm_run);
1622 1762
1763out:
1623 if (vcpu->sigset_active) 1764 if (vcpu->sigset_active)
1624 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 1765 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1625 1766
@@ -1995,9 +2136,12 @@ static struct page *kvm_vcpu_nopage(struct vm_area_struct *vma,
1995 2136
1996 *type = VM_FAULT_MINOR; 2137 *type = VM_FAULT_MINOR;
1997 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 2138 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1998 if (pgoff != 0) 2139 if (pgoff == 0)
2140 page = virt_to_page(vcpu->run);
2141 else if (pgoff == KVM_PIO_PAGE_OFFSET)
2142 page = virt_to_page(vcpu->pio_data);
2143 else
1999 return NOPAGE_SIGBUS; 2144 return NOPAGE_SIGBUS;
2000 page = virt_to_page(vcpu->run);
2001 get_page(page); 2145 get_page(page);
2002 return page; 2146 return page;
2003} 2147}
@@ -2094,6 +2238,12 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2094 goto out_unlock; 2238 goto out_unlock;
2095 vcpu->run = page_address(page); 2239 vcpu->run = page_address(page);
2096 2240
2241 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2242 r = -ENOMEM;
2243 if (!page)
2244 goto out_free_run;
2245 vcpu->pio_data = page_address(page);
2246
2097 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf, 2247 vcpu->host_fx_image = (char*)ALIGN((hva_t)vcpu->fx_buf,
2098 FX_IMAGE_ALIGN); 2248 FX_IMAGE_ALIGN);
2099 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE; 2249 vcpu->guest_fx_image = vcpu->host_fx_image + FX_IMAGE_SIZE;
@@ -2123,6 +2273,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
2123 2273
2124out_free_vcpus: 2274out_free_vcpus:
2125 kvm_free_vcpu(vcpu); 2275 kvm_free_vcpu(vcpu);
2276out_free_run:
2277 free_page((unsigned long)vcpu->run);
2278 vcpu->run = NULL;
2126out_unlock: 2279out_unlock:
2127 mutex_unlock(&vcpu->mutex); 2280 mutex_unlock(&vcpu->mutex);
2128out: 2281out:
@@ -2491,7 +2644,7 @@ static long kvm_dev_ioctl(struct file *filp,
2491 r = -EINVAL; 2644 r = -EINVAL;
2492 if (arg) 2645 if (arg)
2493 goto out; 2646 goto out;
2494 r = PAGE_SIZE; 2647 r = 2 * PAGE_SIZE;
2495 break; 2648 break;
2496 default: 2649 default:
2497 ; 2650 ;
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 2d905770fd88..4843e95e54e1 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -735,6 +735,15 @@ hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
735 return gpa_to_hpa(vcpu, gpa); 735 return gpa_to_hpa(vcpu, gpa);
736} 736}
737 737
738struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
739{
740 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
741
742 if (gpa == UNMAPPED_GVA)
743 return NULL;
744 return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
745}
746
738static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) 747static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
739{ 748{
740} 749}
diff --git a/drivers/kvm/svm.c b/drivers/kvm/svm.c
index 2396ada23777..64afc5cf890d 100644
--- a/drivers/kvm/svm.c
+++ b/drivers/kvm/svm.c
@@ -984,7 +984,7 @@ static int io_get_override(struct kvm_vcpu *vcpu,
984 return 0; 984 return 0;
985} 985}
986 986
987static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address) 987static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, gva_t *address)
988{ 988{
989 unsigned long addr_mask; 989 unsigned long addr_mask;
990 unsigned long *reg; 990 unsigned long *reg;
@@ -1028,40 +1028,38 @@ static unsigned long io_adress(struct kvm_vcpu *vcpu, int ins, u64 *address)
1028static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1028static int io_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1029{ 1029{
1030 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug? 1030 u32 io_info = vcpu->svm->vmcb->control.exit_info_1; //address size bug?
1031 int _in = io_info & SVM_IOIO_TYPE_MASK; 1031 int size, down, in, string, rep;
1032 unsigned port;
1033 unsigned long count;
1034 gva_t address = 0;
1032 1035
1033 ++kvm_stat.io_exits; 1036 ++kvm_stat.io_exits;
1034 1037
1035 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2; 1038 vcpu->svm->next_rip = vcpu->svm->vmcb->control.exit_info_2;
1036 1039
1037 kvm_run->exit_reason = KVM_EXIT_IO; 1040 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1038 kvm_run->io.port = io_info >> 16; 1041 port = io_info >> 16;
1039 kvm_run->io.direction = (_in) ? KVM_EXIT_IO_IN : KVM_EXIT_IO_OUT; 1042 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1040 kvm_run->io.size = ((io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT); 1043 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1041 kvm_run->io.string = (io_info & SVM_IOIO_STR_MASK) != 0; 1044 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1042 kvm_run->io.rep = (io_info & SVM_IOIO_REP_MASK) != 0; 1045 count = 1;
1043 kvm_run->io.count = 1; 1046 down = (vcpu->svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1044 1047
1045 if (kvm_run->io.string) { 1048 if (string) {
1046 unsigned addr_mask; 1049 unsigned addr_mask;
1047 1050
1048 addr_mask = io_adress(vcpu, _in, &kvm_run->io.address); 1051 addr_mask = io_adress(vcpu, in, &address);
1049 if (!addr_mask) { 1052 if (!addr_mask) {
1050 printk(KERN_DEBUG "%s: get io address failed\n", 1053 printk(KERN_DEBUG "%s: get io address failed\n",
1051 __FUNCTION__); 1054 __FUNCTION__);
1052 return 1; 1055 return 1;
1053 } 1056 }
1054 1057
1055 if (kvm_run->io.rep) { 1058 if (rep)
1056 kvm_run->io.count 1059 count = vcpu->regs[VCPU_REGS_RCX] & addr_mask;
1057 = vcpu->regs[VCPU_REGS_RCX] & addr_mask; 1060 }
1058 kvm_run->io.string_down = (vcpu->svm->vmcb->save.rflags 1061 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1059 & X86_EFLAGS_DF) != 0; 1062 address, rep, port);
1060 }
1061 } else
1062 kvm_run->io.value = vcpu->svm->vmcb->save.rax;
1063 vcpu->pio_pending = 1;
1064 return 0;
1065} 1063}
1066 1064
1067static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1065static int nop_on_interception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index e69bab6d811d..0d9bf0b36d37 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1394,7 +1394,7 @@ static int handle_triple_fault(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1394 return 0; 1394 return 0;
1395} 1395}
1396 1396
1397static int get_io_count(struct kvm_vcpu *vcpu, u64 *count) 1397static int get_io_count(struct kvm_vcpu *vcpu, unsigned long *count)
1398{ 1398{
1399 u64 inst; 1399 u64 inst;
1400 gva_t rip; 1400 gva_t rip;
@@ -1439,35 +1439,35 @@ static int get_io_count(struct kvm_vcpu *vcpu, u64 *count)
1439done: 1439done:
1440 countr_size *= 8; 1440 countr_size *= 8;
1441 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size)); 1441 *count = vcpu->regs[VCPU_REGS_RCX] & (~0ULL >> (64 - countr_size));
1442 //printk("cx: %lx\n", vcpu->regs[VCPU_REGS_RCX]);
1442 return 1; 1443 return 1;
1443} 1444}
1444 1445
1445static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 1446static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1446{ 1447{
1447 u64 exit_qualification; 1448 u64 exit_qualification;
1449 int size, down, in, string, rep;
1450 unsigned port;
1451 unsigned long count;
1452 gva_t address;
1448 1453
1449 ++kvm_stat.io_exits; 1454 ++kvm_stat.io_exits;
1450 exit_qualification = vmcs_read64(EXIT_QUALIFICATION); 1455 exit_qualification = vmcs_read64(EXIT_QUALIFICATION);
1451 kvm_run->exit_reason = KVM_EXIT_IO; 1456 in = (exit_qualification & 8) != 0;
1452 if (exit_qualification & 8) 1457 size = (exit_qualification & 7) + 1;
1453 kvm_run->io.direction = KVM_EXIT_IO_IN; 1458 string = (exit_qualification & 16) != 0;
1454 else 1459 down = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0;
1455 kvm_run->io.direction = KVM_EXIT_IO_OUT; 1460 count = 1;
1456 kvm_run->io.size = (exit_qualification & 7) + 1; 1461 rep = (exit_qualification & 32) != 0;
1457 kvm_run->io.string = (exit_qualification & 16) != 0; 1462 port = exit_qualification >> 16;
1458 kvm_run->io.string_down 1463 address = 0;
1459 = (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_DF) != 0; 1464 if (string) {
1460 kvm_run->io.rep = (exit_qualification & 32) != 0; 1465 if (rep && !get_io_count(vcpu, &count))
1461 kvm_run->io.port = exit_qualification >> 16;
1462 kvm_run->io.count = 1;
1463 if (kvm_run->io.string) {
1464 if (!get_io_count(vcpu, &kvm_run->io.count))
1465 return 1; 1466 return 1;
1466 kvm_run->io.address = vmcs_readl(GUEST_LINEAR_ADDRESS); 1467 address = vmcs_readl(GUEST_LINEAR_ADDRESS);
1467 } else 1468 }
1468 kvm_run->io.value = vcpu->regs[VCPU_REGS_RAX]; /* rax */ 1469 return kvm_setup_pio(vcpu, kvm_run, in, size, count, string, down,
1469 vcpu->pio_pending = 1; 1470 address, rep, port);
1470 return 0;
1471} 1471}
1472 1472
1473static void 1473static void