aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_emulate.h14
-rw-r--r--arch/x86/include/asm/kvm_host.h7
-rw-r--r--arch/x86/kvm/emulate.c6
-rw-r--r--arch/x86/kvm/mmu.c17
-rw-r--r--arch/x86/kvm/mmu.h6
-rw-r--r--arch/x86/kvm/paging_tmpl.h11
-rw-r--r--arch/x86/kvm/x86.c131
7 files changed, 142 insertions, 50 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 784d7c586d8e..7a6f54fa13ba 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -54,13 +54,23 @@ struct x86_emulate_ctxt;
54struct x86_emulate_ops { 54struct x86_emulate_ops {
55 /* 55 /*
56 * read_std: Read bytes of standard (non-emulated/special) memory. 56 * read_std: Read bytes of standard (non-emulated/special) memory.
57 * Used for instruction fetch, stack operations, and others. 57 * Used for descriptor reading.
58 * @addr: [IN ] Linear address from which to read. 58 * @addr: [IN ] Linear address from which to read.
59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'. 59 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
60 * @bytes: [IN ] Number of bytes to read from memory. 60 * @bytes: [IN ] Number of bytes to read from memory.
61 */ 61 */
62 int (*read_std)(unsigned long addr, void *val, 62 int (*read_std)(unsigned long addr, void *val,
63 unsigned int bytes, struct kvm_vcpu *vcpu); 63 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
64
65 /*
66 * fetch: Read bytes of standard (non-emulated/special) memory.
67 * Used for instruction fetch.
68 * @addr: [IN ] Linear address from which to read.
69 * @val: [OUT] Value read from memory, zero-extended to 'u_long'.
70 * @bytes: [IN ] Number of bytes to read from memory.
71 */
72 int (*fetch)(unsigned long addr, void *val,
73 unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
64 74
65 /* 75 /*
66 * read_emulated: Read bytes from emulated/special memory area. 76 * read_emulated: Read bytes from emulated/special memory area.
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 152233723844..c07c16f64015 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -243,7 +243,8 @@ struct kvm_mmu {
243 void (*new_cr3)(struct kvm_vcpu *vcpu); 243 void (*new_cr3)(struct kvm_vcpu *vcpu);
244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err);
245 void (*free)(struct kvm_vcpu *vcpu); 245 void (*free)(struct kvm_vcpu *vcpu);
246 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 246 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
247 u32 *error);
247 void (*prefetch_page)(struct kvm_vcpu *vcpu, 248 void (*prefetch_page)(struct kvm_vcpu *vcpu,
248 struct kvm_mmu_page *page); 249 struct kvm_mmu_page *page);
249 int (*sync_page)(struct kvm_vcpu *vcpu, 250 int (*sync_page)(struct kvm_vcpu *vcpu,
@@ -660,6 +661,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
660int kvm_mmu_load(struct kvm_vcpu *vcpu); 661int kvm_mmu_load(struct kvm_vcpu *vcpu);
661void kvm_mmu_unload(struct kvm_vcpu *vcpu); 662void kvm_mmu_unload(struct kvm_vcpu *vcpu);
662void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu); 663void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
664gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
665gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
666gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
667gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error);
663 668
664int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 669int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
665 670
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e4e2df3b6038..c44b46014842 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -616,7 +616,7 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
616 616
617 if (linear < fc->start || linear >= fc->end) { 617 if (linear < fc->start || linear >= fc->end) {
618 size = min(15UL, PAGE_SIZE - offset_in_page(linear)); 618 size = min(15UL, PAGE_SIZE - offset_in_page(linear));
619 rc = ops->read_std(linear, fc->data, size, ctxt->vcpu); 619 rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL);
620 if (rc) 620 if (rc)
621 return rc; 621 return rc;
622 fc->start = linear; 622 fc->start = linear;
@@ -671,11 +671,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
671 op_bytes = 3; 671 op_bytes = 3;
672 *address = 0; 672 *address = 0;
673 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, 673 rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2,
674 ctxt->vcpu); 674 ctxt->vcpu, NULL);
675 if (rc) 675 if (rc)
676 return rc; 676 return rc;
677 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, 677 rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes,
678 ctxt->vcpu); 678 ctxt->vcpu, NULL);
679 return rc; 679 return rc;
680} 680}
681 681
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 739793240d1d..741373e8ca77 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -138,12 +138,6 @@ module_param(oos_shadow, bool, 0644);
138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 138#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
139 | PT64_NX_MASK) 139 | PT64_NX_MASK)
140 140
141#define PFERR_PRESENT_MASK (1U << 0)
142#define PFERR_WRITE_MASK (1U << 1)
143#define PFERR_USER_MASK (1U << 2)
144#define PFERR_RSVD_MASK (1U << 3)
145#define PFERR_FETCH_MASK (1U << 4)
146
147#define RMAP_EXT 4 141#define RMAP_EXT 4
148 142
149#define ACC_EXEC_MASK 1 143#define ACC_EXEC_MASK 1
@@ -1632,7 +1626,7 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1632{ 1626{
1633 struct page *page; 1627 struct page *page;
1634 1628
1635 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 1629 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
1636 1630
1637 if (gpa == UNMAPPED_GVA) 1631 if (gpa == UNMAPPED_GVA)
1638 return NULL; 1632 return NULL;
@@ -2155,8 +2149,11 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2155 spin_unlock(&vcpu->kvm->mmu_lock); 2149 spin_unlock(&vcpu->kvm->mmu_lock);
2156} 2150}
2157 2151
2158static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) 2152static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2153 u32 access, u32 *error)
2159{ 2154{
2155 if (error)
2156 *error = 0;
2160 return vaddr; 2157 return vaddr;
2161} 2158}
2162 2159
@@ -2740,7 +2737,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2740 if (tdp_enabled) 2737 if (tdp_enabled)
2741 return 0; 2738 return 0;
2742 2739
2743 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva); 2740 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
2744 2741
2745 spin_lock(&vcpu->kvm->mmu_lock); 2742 spin_lock(&vcpu->kvm->mmu_lock);
2746 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); 2743 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
@@ -3237,7 +3234,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3237 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level)) 3234 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3238 audit_mappings_page(vcpu, ent, va, level - 1); 3235 audit_mappings_page(vcpu, ent, va, level - 1);
3239 else { 3236 else {
3240 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va); 3237 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
3241 gfn_t gfn = gpa >> PAGE_SHIFT; 3238 gfn_t gfn = gpa >> PAGE_SHIFT;
3242 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn); 3239 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3243 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT; 3240 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 61ef5a65b7d8..be66759321a5 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -42,6 +42,12 @@
42#define PT_DIRECTORY_LEVEL 2 42#define PT_DIRECTORY_LEVEL 2
43#define PT_PAGE_TABLE_LEVEL 1 43#define PT_PAGE_TABLE_LEVEL 1
44 44
45#define PFERR_PRESENT_MASK (1U << 0)
46#define PFERR_WRITE_MASK (1U << 1)
47#define PFERR_USER_MASK (1U << 2)
48#define PFERR_RSVD_MASK (1U << 3)
49#define PFERR_FETCH_MASK (1U << 4)
50
45int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
46 52
47static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 53static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index df15a5307d2d..81eab9a50e6a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -490,18 +490,23 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
490 spin_unlock(&vcpu->kvm->mmu_lock); 490 spin_unlock(&vcpu->kvm->mmu_lock);
491} 491}
492 492
493static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 493static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,
494 u32 *error)
494{ 495{
495 struct guest_walker walker; 496 struct guest_walker walker;
496 gpa_t gpa = UNMAPPED_GVA; 497 gpa_t gpa = UNMAPPED_GVA;
497 int r; 498 int r;
498 499
499 r = FNAME(walk_addr)(&walker, vcpu, vaddr, 0, 0, 0); 500 r = FNAME(walk_addr)(&walker, vcpu, vaddr,
501 !!(access & PFERR_WRITE_MASK),
502 !!(access & PFERR_USER_MASK),
503 !!(access & PFERR_FETCH_MASK));
500 504
501 if (r) { 505 if (r) {
502 gpa = gfn_to_gpa(walker.gfn); 506 gpa = gfn_to_gpa(walker.gfn);
503 gpa |= vaddr & ~PAGE_MASK; 507 gpa |= vaddr & ~PAGE_MASK;
504 } 508 } else if (error)
509 *error = walker.error_code;
505 510
506 return gpa; 511 return gpa;
507} 512}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a28379507d30..ea3a8af8a478 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3039,14 +3039,41 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
3039 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); 3039 return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
3040} 3040}
3041 3041
3042static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes, 3042gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3043 struct kvm_vcpu *vcpu) 3043{
3044 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3045 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3046}
3047
3048 gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3049{
3050 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3051 access |= PFERR_FETCH_MASK;
3052 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3053}
3054
3055gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3056{
3057 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3058 access |= PFERR_WRITE_MASK;
3059 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, access, error);
3060}
3061
3062/* uses this to access any guest's mapped memory without checking CPL */
3063gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
3064{
3065 return vcpu->arch.mmu.gva_to_gpa(vcpu, gva, 0, error);
3066}
3067
3068static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes,
3069 struct kvm_vcpu *vcpu, u32 access,
3070 u32 *error)
3044{ 3071{
3045 void *data = val; 3072 void *data = val;
3046 int r = X86EMUL_CONTINUE; 3073 int r = X86EMUL_CONTINUE;
3047 3074
3048 while (bytes) { 3075 while (bytes) {
3049 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3076 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
3050 unsigned offset = addr & (PAGE_SIZE-1); 3077 unsigned offset = addr & (PAGE_SIZE-1);
3051 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset); 3078 unsigned toread = min(bytes, (unsigned)PAGE_SIZE - offset);
3052 int ret; 3079 int ret;
@@ -3069,14 +3096,37 @@ out:
3069 return r; 3096 return r;
3070} 3097}
3071 3098
3099/* used for instruction fetching */
3100static int kvm_fetch_guest_virt(gva_t addr, void *val, unsigned int bytes,
3101 struct kvm_vcpu *vcpu, u32 *error)
3102{
3103 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3104 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu,
3105 access | PFERR_FETCH_MASK, error);
3106}
3107
3108static int kvm_read_guest_virt(gva_t addr, void *val, unsigned int bytes,
3109 struct kvm_vcpu *vcpu, u32 *error)
3110{
3111 u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
3112 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, access,
3113 error);
3114}
3115
3116static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
3117 struct kvm_vcpu *vcpu, u32 *error)
3118{
3119 return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
3120}
3121
3072static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, 3122static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
3073 struct kvm_vcpu *vcpu) 3123 struct kvm_vcpu *vcpu, u32 *error)
3074{ 3124{
3075 void *data = val; 3125 void *data = val;
3076 int r = X86EMUL_CONTINUE; 3126 int r = X86EMUL_CONTINUE;
3077 3127
3078 while (bytes) { 3128 while (bytes) {
3079 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3129 gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error);
3080 unsigned offset = addr & (PAGE_SIZE-1); 3130 unsigned offset = addr & (PAGE_SIZE-1);
3081 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); 3131 unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
3082 int ret; 3132 int ret;
@@ -3106,6 +3156,7 @@ static int emulator_read_emulated(unsigned long addr,
3106 struct kvm_vcpu *vcpu) 3156 struct kvm_vcpu *vcpu)
3107{ 3157{
3108 gpa_t gpa; 3158 gpa_t gpa;
3159 u32 error_code;
3109 3160
3110 if (vcpu->mmio_read_completed) { 3161 if (vcpu->mmio_read_completed) {
3111 memcpy(val, vcpu->mmio_data, bytes); 3162 memcpy(val, vcpu->mmio_data, bytes);
@@ -3115,17 +3166,20 @@ static int emulator_read_emulated(unsigned long addr,
3115 return X86EMUL_CONTINUE; 3166 return X86EMUL_CONTINUE;
3116 } 3167 }
3117 3168
3118 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3169 gpa = kvm_mmu_gva_to_gpa_read(vcpu, addr, &error_code);
3170
3171 if (gpa == UNMAPPED_GVA) {
3172 kvm_inject_page_fault(vcpu, addr, error_code);
3173 return X86EMUL_PROPAGATE_FAULT;
3174 }
3119 3175
3120 /* For APIC access vmexit */ 3176 /* For APIC access vmexit */
3121 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3177 if ((gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
3122 goto mmio; 3178 goto mmio;
3123 3179
3124 if (kvm_read_guest_virt(addr, val, bytes, vcpu) 3180 if (kvm_read_guest_virt(addr, val, bytes, vcpu, NULL)
3125 == X86EMUL_CONTINUE) 3181 == X86EMUL_CONTINUE)
3126 return X86EMUL_CONTINUE; 3182 return X86EMUL_CONTINUE;
3127 if (gpa == UNMAPPED_GVA)
3128 return X86EMUL_PROPAGATE_FAULT;
3129 3183
3130mmio: 3184mmio:
3131 /* 3185 /*
@@ -3164,11 +3218,12 @@ static int emulator_write_emulated_onepage(unsigned long addr,
3164 struct kvm_vcpu *vcpu) 3218 struct kvm_vcpu *vcpu)
3165{ 3219{
3166 gpa_t gpa; 3220 gpa_t gpa;
3221 u32 error_code;
3167 3222
3168 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3223 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, &error_code);
3169 3224
3170 if (gpa == UNMAPPED_GVA) { 3225 if (gpa == UNMAPPED_GVA) {
3171 kvm_inject_page_fault(vcpu, addr, 2); 3226 kvm_inject_page_fault(vcpu, addr, error_code);
3172 return X86EMUL_PROPAGATE_FAULT; 3227 return X86EMUL_PROPAGATE_FAULT;
3173 } 3228 }
3174 3229
@@ -3232,7 +3287,7 @@ static int emulator_cmpxchg_emulated(unsigned long addr,
3232 char *kaddr; 3287 char *kaddr;
3233 u64 val; 3288 u64 val;
3234 3289
3235 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr); 3290 gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, NULL);
3236 3291
3237 if (gpa == UNMAPPED_GVA || 3292 if (gpa == UNMAPPED_GVA ||
3238 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE) 3293 (gpa & PAGE_MASK) == APIC_DEFAULT_PHYS_BASE)
@@ -3297,7 +3352,7 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3297 3352
3298 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS); 3353 rip_linear = rip + get_segment_base(vcpu, VCPU_SREG_CS);
3299 3354
3300 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu); 3355 kvm_read_guest_virt(rip_linear, (void *)opcodes, 4, vcpu, NULL);
3301 3356
3302 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n", 3357 printk(KERN_ERR "emulation failed (%s) rip %lx %02x %02x %02x %02x\n",
3303 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]); 3358 context, rip, opcodes[0], opcodes[1], opcodes[2], opcodes[3]);
@@ -3305,7 +3360,8 @@ void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context)
3305EXPORT_SYMBOL_GPL(kvm_report_emulation_failure); 3360EXPORT_SYMBOL_GPL(kvm_report_emulation_failure);
3306 3361
3307static struct x86_emulate_ops emulate_ops = { 3362static struct x86_emulate_ops emulate_ops = {
3308 .read_std = kvm_read_guest_virt, 3363 .read_std = kvm_read_guest_virt_system,
3364 .fetch = kvm_fetch_guest_virt,
3309 .read_emulated = emulator_read_emulated, 3365 .read_emulated = emulator_read_emulated,
3310 .write_emulated = emulator_write_emulated, 3366 .write_emulated = emulator_write_emulated,
3311 .cmpxchg_emulated = emulator_cmpxchg_emulated, 3367 .cmpxchg_emulated = emulator_cmpxchg_emulated,
@@ -3442,12 +3498,17 @@ static int pio_copy_data(struct kvm_vcpu *vcpu)
3442 gva_t q = vcpu->arch.pio.guest_gva; 3498 gva_t q = vcpu->arch.pio.guest_gva;
3443 unsigned bytes; 3499 unsigned bytes;
3444 int ret; 3500 int ret;
3501 u32 error_code;
3445 3502
3446 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count; 3503 bytes = vcpu->arch.pio.size * vcpu->arch.pio.cur_count;
3447 if (vcpu->arch.pio.in) 3504 if (vcpu->arch.pio.in)
3448 ret = kvm_write_guest_virt(q, p, bytes, vcpu); 3505 ret = kvm_write_guest_virt(q, p, bytes, vcpu, &error_code);
3449 else 3506 else
3450 ret = kvm_read_guest_virt(q, p, bytes, vcpu); 3507 ret = kvm_read_guest_virt(q, p, bytes, vcpu, &error_code);
3508
3509 if (ret == X86EMUL_PROPAGATE_FAULT)
3510 kvm_inject_page_fault(vcpu, q, error_code);
3511
3451 return ret; 3512 return ret;
3452} 3513}
3453 3514
@@ -3468,7 +3529,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
3468 if (io->in) { 3529 if (io->in) {
3469 r = pio_copy_data(vcpu); 3530 r = pio_copy_data(vcpu);
3470 if (r) 3531 if (r)
3471 return r; 3532 goto out;
3472 } 3533 }
3473 3534
3474 delta = 1; 3535 delta = 1;
@@ -3495,7 +3556,7 @@ int complete_pio(struct kvm_vcpu *vcpu)
3495 kvm_register_write(vcpu, VCPU_REGS_RSI, val); 3556 kvm_register_write(vcpu, VCPU_REGS_RSI, val);
3496 } 3557 }
3497 } 3558 }
3498 3559out:
3499 io->count -= io->cur_count; 3560 io->count -= io->cur_count;
3500 io->cur_count = 0; 3561 io->cur_count = 0;
3501 3562
@@ -3617,10 +3678,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, int in,
3617 if (!vcpu->arch.pio.in) { 3678 if (!vcpu->arch.pio.in) {
3618 /* string PIO write */ 3679 /* string PIO write */
3619 ret = pio_copy_data(vcpu); 3680 ret = pio_copy_data(vcpu);
3620 if (ret == X86EMUL_PROPAGATE_FAULT) { 3681 if (ret == X86EMUL_PROPAGATE_FAULT)
3621 kvm_inject_gp(vcpu, 0);
3622 return 1; 3682 return 1;
3623 }
3624 if (ret == 0 && !pio_string_write(vcpu)) { 3683 if (ret == 0 && !pio_string_write(vcpu)) {
3625 complete_pio(vcpu); 3684 complete_pio(vcpu);
3626 if (vcpu->arch.pio.count == 0) 3685 if (vcpu->arch.pio.count == 0)
@@ -4663,7 +4722,9 @@ static int load_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4663 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc); 4722 kvm_queue_exception_e(vcpu, GP_VECTOR, selector & 0xfffc);
4664 return X86EMUL_PROPAGATE_FAULT; 4723 return X86EMUL_PROPAGATE_FAULT;
4665 } 4724 }
4666 return kvm_read_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); 4725 return kvm_read_guest_virt_system(dtable.base + index*8,
4726 seg_desc, sizeof(*seg_desc),
4727 vcpu, NULL);
4667} 4728}
4668 4729
4669/* allowed just for 8 bytes segments */ 4730/* allowed just for 8 bytes segments */
@@ -4677,15 +4738,23 @@ static int save_guest_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
4677 4738
4678 if (dtable.limit < index * 8 + 7) 4739 if (dtable.limit < index * 8 + 7)
4679 return 1; 4740 return 1;
4680 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu); 4741 return kvm_write_guest_virt(dtable.base + index*8, seg_desc, sizeof(*seg_desc), vcpu, NULL);
4742}
4743
4744static gpa_t get_tss_base_addr_write(struct kvm_vcpu *vcpu,
4745 struct desc_struct *seg_desc)
4746{
4747 u32 base_addr = get_desc_base(seg_desc);
4748
4749 return kvm_mmu_gva_to_gpa_write(vcpu, base_addr, NULL);
4681} 4750}
4682 4751
4683static gpa_t get_tss_base_addr(struct kvm_vcpu *vcpu, 4752static gpa_t get_tss_base_addr_read(struct kvm_vcpu *vcpu,
4684 struct desc_struct *seg_desc) 4753 struct desc_struct *seg_desc)
4685{ 4754{
4686 u32 base_addr = get_desc_base(seg_desc); 4755 u32 base_addr = get_desc_base(seg_desc);
4687 4756
4688 return vcpu->arch.mmu.gva_to_gpa(vcpu, base_addr); 4757 return kvm_mmu_gva_to_gpa_read(vcpu, base_addr, NULL);
4689} 4758}
4690 4759
4691static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg) 4760static u16 get_segment_selector(struct kvm_vcpu *vcpu, int seg)
@@ -4894,7 +4963,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4894 sizeof tss_segment_16)) 4963 sizeof tss_segment_16))
4895 goto out; 4964 goto out;
4896 4965
4897 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 4966 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
4898 &tss_segment_16, sizeof tss_segment_16)) 4967 &tss_segment_16, sizeof tss_segment_16))
4899 goto out; 4968 goto out;
4900 4969
@@ -4902,7 +4971,7 @@ static int kvm_task_switch_16(struct kvm_vcpu *vcpu, u16 tss_selector,
4902 tss_segment_16.prev_task_link = old_tss_sel; 4971 tss_segment_16.prev_task_link = old_tss_sel;
4903 4972
4904 if (kvm_write_guest(vcpu->kvm, 4973 if (kvm_write_guest(vcpu->kvm,
4905 get_tss_base_addr(vcpu, nseg_desc), 4974 get_tss_base_addr_write(vcpu, nseg_desc),
4906 &tss_segment_16.prev_task_link, 4975 &tss_segment_16.prev_task_link,
4907 sizeof tss_segment_16.prev_task_link)) 4976 sizeof tss_segment_16.prev_task_link))
4908 goto out; 4977 goto out;
@@ -4933,7 +5002,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4933 sizeof tss_segment_32)) 5002 sizeof tss_segment_32))
4934 goto out; 5003 goto out;
4935 5004
4936 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr(vcpu, nseg_desc), 5005 if (kvm_read_guest(vcpu->kvm, get_tss_base_addr_read(vcpu, nseg_desc),
4937 &tss_segment_32, sizeof tss_segment_32)) 5006 &tss_segment_32, sizeof tss_segment_32))
4938 goto out; 5007 goto out;
4939 5008
@@ -4941,7 +5010,7 @@ static int kvm_task_switch_32(struct kvm_vcpu *vcpu, u16 tss_selector,
4941 tss_segment_32.prev_task_link = old_tss_sel; 5010 tss_segment_32.prev_task_link = old_tss_sel;
4942 5011
4943 if (kvm_write_guest(vcpu->kvm, 5012 if (kvm_write_guest(vcpu->kvm,
4944 get_tss_base_addr(vcpu, nseg_desc), 5013 get_tss_base_addr_write(vcpu, nseg_desc),
4945 &tss_segment_32.prev_task_link, 5014 &tss_segment_32.prev_task_link,
4946 sizeof tss_segment_32.prev_task_link)) 5015 sizeof tss_segment_32.prev_task_link))
4947 goto out; 5016 goto out;
@@ -4964,7 +5033,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
4964 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR); 5033 u32 old_tss_base = get_segment_base(vcpu, VCPU_SREG_TR);
4965 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR); 5034 u16 old_tss_sel = get_segment_selector(vcpu, VCPU_SREG_TR);
4966 5035
4967 old_tss_base = vcpu->arch.mmu.gva_to_gpa(vcpu, old_tss_base); 5036 old_tss_base = kvm_mmu_gva_to_gpa_write(vcpu, old_tss_base, NULL);
4968 5037
4969 /* FIXME: Handle errors. Failure to read either TSS or their 5038 /* FIXME: Handle errors. Failure to read either TSS or their
4970 * descriptors should generate a pagefault. 5039 * descriptors should generate a pagefault.
@@ -5199,7 +5268,7 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
5199 5268
5200 vcpu_load(vcpu); 5269 vcpu_load(vcpu);
5201 idx = srcu_read_lock(&vcpu->kvm->srcu); 5270 idx = srcu_read_lock(&vcpu->kvm->srcu);
5202 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vaddr); 5271 gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
5203 srcu_read_unlock(&vcpu->kvm->srcu, idx); 5272 srcu_read_unlock(&vcpu->kvm->srcu, idx);
5204 tr->physical_address = gpa; 5273 tr->physical_address = gpa;
5205 tr->valid = gpa != UNMAPPED_GVA; 5274 tr->valid = gpa != UNMAPPED_GVA;