aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2011-03-07 07:55:06 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2011-03-17 12:08:33 -0400
commit5601d05b8c340ee2643febc146099325eff187eb (patch)
tree61fb3298bb267eecfd899621cc852114d636de52 /arch
parent831ca6093ca486060721f5c3c74f97b10f3172b9 (diff)
KVM: emulator: Fix io permission checking for 64bit guest
Current implementation truncates upper 32bit of TR base address during IO permission bitmap check. The patch fixes this. Reported-and-tested-by: Francis Moreau <francis.moro@gmail.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/kvm/emulate.c37
-rw-r--r--arch/x86/kvm/x86.c15
3 files changed, 35 insertions, 21 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 50ebc327a368..0f5213564326 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -142,9 +142,9 @@ struct x86_emulate_ops {
142 int (*pio_out_emulated)(int size, unsigned short port, const void *val, 142 int (*pio_out_emulated)(int size, unsigned short port, const void *val,
143 unsigned int count, struct kvm_vcpu *vcpu); 143 unsigned int count, struct kvm_vcpu *vcpu);
144 144
145 bool (*get_cached_descriptor)(struct desc_struct *desc, 145 bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3,
146 int seg, struct kvm_vcpu *vcpu); 146 int seg, struct kvm_vcpu *vcpu);
147 void (*set_cached_descriptor)(struct desc_struct *desc, 147 void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3,
148 int seg, struct kvm_vcpu *vcpu); 148 int seg, struct kvm_vcpu *vcpu);
149 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); 149 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
150 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); 150 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a90d7e033304..d6088b8686fb 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -878,7 +878,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
878 if (selector & 1 << 2) { 878 if (selector & 1 << 2) {
879 struct desc_struct desc; 879 struct desc_struct desc;
880 memset (dt, 0, sizeof *dt); 880 memset (dt, 0, sizeof *dt);
881 if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) 881 if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR,
882 ctxt->vcpu))
882 return; 883 return;
883 884
884 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 885 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
@@ -930,6 +931,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
930 return ret; 931 return ret;
931} 932}
932 933
934/* Does not support long mode */
933static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 935static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
934 struct x86_emulate_ops *ops, 936 struct x86_emulate_ops *ops,
935 u16 selector, int seg) 937 u16 selector, int seg)
@@ -1041,7 +1043,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1041 } 1043 }
1042load: 1044load:
1043 ops->set_segment_selector(selector, seg, ctxt->vcpu); 1045 ops->set_segment_selector(selector, seg, ctxt->vcpu);
1044 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); 1046 ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu);
1045 return X86EMUL_CONTINUE; 1047 return X86EMUL_CONTINUE;
1046exception: 1048exception:
1047 emulate_exception(ctxt, err_vec, err_code, true); 1049 emulate_exception(ctxt, err_vec, err_code, true);
@@ -1561,7 +1563,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1561 struct desc_struct *ss) 1563 struct desc_struct *ss)
1562{ 1564{
1563 memset(cs, 0, sizeof(struct desc_struct)); 1565 memset(cs, 0, sizeof(struct desc_struct));
1564 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu); 1566 ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu);
1565 memset(ss, 0, sizeof(struct desc_struct)); 1567 memset(ss, 0, sizeof(struct desc_struct));
1566 1568
1567 cs->l = 0; /* will be adjusted later */ 1569 cs->l = 0; /* will be adjusted later */
@@ -1608,9 +1610,9 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1608 cs.d = 0; 1610 cs.d = 0;
1609 cs.l = 1; 1611 cs.l = 1;
1610 } 1612 }
1611 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); 1613 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1612 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1614 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1613 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); 1615 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1614 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1616 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1615 1617
1616 c->regs[VCPU_REGS_RCX] = c->eip; 1618 c->regs[VCPU_REGS_RCX] = c->eip;
@@ -1680,9 +1682,9 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1680 cs.l = 1; 1682 cs.l = 1;
1681 } 1683 }
1682 1684
1683 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); 1685 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1684 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1686 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1685 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); 1687 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1686 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1688 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1687 1689
1688 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); 1690 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
@@ -1737,9 +1739,9 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1737 cs_sel |= SELECTOR_RPL_MASK; 1739 cs_sel |= SELECTOR_RPL_MASK;
1738 ss_sel |= SELECTOR_RPL_MASK; 1740 ss_sel |= SELECTOR_RPL_MASK;
1739 1741
1740 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); 1742 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu);
1741 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1743 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1742 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); 1744 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu);
1743 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1745 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1744 1746
1745 c->eip = c->regs[VCPU_REGS_RDX]; 1747 c->eip = c->regs[VCPU_REGS_RDX];
@@ -1765,24 +1767,29 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
1765 u16 port, u16 len) 1767 u16 port, u16 len)
1766{ 1768{
1767 struct desc_struct tr_seg; 1769 struct desc_struct tr_seg;
1770 u32 base3;
1768 int r; 1771 int r;
1769 u16 io_bitmap_ptr; 1772 u16 io_bitmap_ptr;
1770 u8 perm, bit_idx = port & 0x7; 1773 u8 perm, bit_idx = port & 0x7;
1771 unsigned mask = (1 << len) - 1; 1774 unsigned mask = (1 << len) - 1;
1775 unsigned long base;
1772 1776
1773 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu); 1777 ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu);
1774 if (!tr_seg.p) 1778 if (!tr_seg.p)
1775 return false; 1779 return false;
1776 if (desc_limit_scaled(&tr_seg) < 103) 1780 if (desc_limit_scaled(&tr_seg) < 103)
1777 return false; 1781 return false;
1778 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2, 1782 base = get_desc_base(&tr_seg);
1779 ctxt->vcpu, NULL); 1783#ifdef CONFIG_X86_64
1784 base |= ((u64)base3) << 32;
1785#endif
1786 r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL);
1780 if (r != X86EMUL_CONTINUE) 1787 if (r != X86EMUL_CONTINUE)
1781 return false; 1788 return false;
1782 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) 1789 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
1783 return false; 1790 return false;
1784 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8, 1791 r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 1, ctxt->vcpu,
1785 &perm, 1, ctxt->vcpu, NULL); 1792 NULL);
1786 if (r != X86EMUL_CONTINUE) 1793 if (r != X86EMUL_CONTINUE)
1787 return false; 1794 return false;
1788 if ((perm >> bit_idx) & mask) 1795 if ((perm >> bit_idx) & mask)
@@ -2127,7 +2134,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2127 } 2134 }
2128 2135
2129 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); 2136 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2130 ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); 2137 ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu);
2131 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); 2138 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu);
2132 2139
2133 if (has_error_code) { 2140 if (has_error_code) {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b9c2b8e6c70c..01f08a65d09b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4162,8 +4162,8 @@ static unsigned long emulator_get_cached_segment_base(int seg,
4162 return get_segment_base(vcpu, seg); 4162 return get_segment_base(vcpu, seg);
4163} 4163}
4164 4164
4165static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg, 4165static bool emulator_get_cached_descriptor(struct desc_struct *desc, u32 *base3,
4166 struct kvm_vcpu *vcpu) 4166 int seg, struct kvm_vcpu *vcpu)
4167{ 4167{
4168 struct kvm_segment var; 4168 struct kvm_segment var;
4169 4169
@@ -4176,6 +4176,10 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
4176 var.limit >>= 12; 4176 var.limit >>= 12;
4177 set_desc_limit(desc, var.limit); 4177 set_desc_limit(desc, var.limit);
4178 set_desc_base(desc, (unsigned long)var.base); 4178 set_desc_base(desc, (unsigned long)var.base);
4179#ifdef CONFIG_X86_64
4180 if (base3)
4181 *base3 = var.base >> 32;
4182#endif
4179 desc->type = var.type; 4183 desc->type = var.type;
4180 desc->s = var.s; 4184 desc->s = var.s;
4181 desc->dpl = var.dpl; 4185 desc->dpl = var.dpl;
@@ -4188,8 +4192,8 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
4188 return true; 4192 return true;
4189} 4193}
4190 4194
4191static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg, 4195static void emulator_set_cached_descriptor(struct desc_struct *desc, u32 base3,
4192 struct kvm_vcpu *vcpu) 4196 int seg, struct kvm_vcpu *vcpu)
4193{ 4197{
4194 struct kvm_segment var; 4198 struct kvm_segment var;
4195 4199
@@ -4197,6 +4201,9 @@ static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
4197 kvm_get_segment(vcpu, &var, seg); 4201 kvm_get_segment(vcpu, &var, seg);
4198 4202
4199 var.base = get_desc_base(desc); 4203 var.base = get_desc_base(desc);
4204#ifdef CONFIG_X86_64
4205 var.base |= ((u64)base3) << 32;
4206#endif
4200 var.limit = get_desc_limit(desc); 4207 var.limit = get_desc_limit(desc);
4201 if (desc->g) 4208 if (desc->g)
4202 var.limit = (var.limit << 12) | 0xfff; 4209 var.limit = (var.limit << 12) | 0xfff;