diff options
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 17 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 30 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 6 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 9 |
6 files changed, 38 insertions, 31 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index 1bf11400ae99..5187dd88019b 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -229,7 +229,6 @@ struct x86_emulate_ctxt { | |||
229 | int exception; /* exception that happens during emulation or -1 */ | 229 | int exception; /* exception that happens during emulation or -1 */ |
230 | u32 error_code; /* error code for exception */ | 230 | u32 error_code; /* error code for exception */ |
231 | bool error_code_valid; | 231 | bool error_code_valid; |
232 | unsigned long cr2; /* faulted address in case of #PF */ | ||
233 | 232 | ||
234 | /* decode cache */ | 233 | /* decode cache */ |
235 | struct decode_cache decode; | 234 | struct decode_cache decode; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 009a4a1b370e..3fde5b322534 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -239,9 +239,7 @@ struct kvm_mmu { | |||
239 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); | 239 | void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); |
240 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); | 240 | unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); |
241 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); | 241 | int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); |
242 | void (*inject_page_fault)(struct kvm_vcpu *vcpu, | 242 | void (*inject_page_fault)(struct kvm_vcpu *vcpu); |
243 | unsigned long addr, | ||
244 | u32 error_code); | ||
245 | void (*free)(struct kvm_vcpu *vcpu); | 243 | void (*free)(struct kvm_vcpu *vcpu); |
246 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, | 244 | gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, |
247 | u32 *error); | 245 | u32 *error); |
@@ -288,6 +286,16 @@ struct kvm_vcpu_arch { | |||
288 | bool tpr_access_reporting; | 286 | bool tpr_access_reporting; |
289 | 287 | ||
290 | struct kvm_mmu mmu; | 288 | struct kvm_mmu mmu; |
289 | |||
290 | /* | ||
291 | * This struct is filled with the necessary information to propagate a | ||
292 | * page fault into the guest | ||
293 | */ | ||
294 | struct { | ||
295 | u64 address; | ||
296 | unsigned error_code; | ||
297 | } fault; | ||
298 | |||
291 | /* only needed in kvm_pv_mmu_op() path, but it's hot so | 299 | /* only needed in kvm_pv_mmu_op() path, but it's hot so |
292 | * put it here to avoid allocation */ | 300 | * put it here to avoid allocation */ |
293 | struct kvm_pv_mmu_op_buffer mmu_op_buffer; | 301 | struct kvm_pv_mmu_op_buffer mmu_op_buffer; |
@@ -624,8 +632,7 @@ void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr); | |||
624 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 632 | void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
625 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); | 633 | void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr); |
626 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); | 634 | void kvm_requeue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code); |
627 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long cr2, | 635 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu); |
628 | u32 error_code); | ||
629 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); | 636 | bool kvm_require_cpl(struct kvm_vcpu *vcpu, int required_cpl); |
630 | 637 | ||
631 | int kvm_pic_set_irq(void *opaque, int irq, int level); | 638 | int kvm_pic_set_irq(void *opaque, int irq, int level); |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 27d2c22b114e..2b08b78b6cab 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -487,11 +487,9 @@ static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) | |||
487 | emulate_exception(ctxt, GP_VECTOR, err, true); | 487 | emulate_exception(ctxt, GP_VECTOR, err, true); |
488 | } | 488 | } |
489 | 489 | ||
490 | static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, | 490 | static void emulate_pf(struct x86_emulate_ctxt *ctxt) |
491 | int err) | ||
492 | { | 491 | { |
493 | ctxt->cr2 = addr; | 492 | emulate_exception(ctxt, PF_VECTOR, 0, true); |
494 | emulate_exception(ctxt, PF_VECTOR, err, true); | ||
495 | } | 493 | } |
496 | 494 | ||
497 | static void emulate_ud(struct x86_emulate_ctxt *ctxt) | 495 | static void emulate_ud(struct x86_emulate_ctxt *ctxt) |
@@ -834,7 +832,7 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
834 | rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, | 832 | rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, |
835 | ctxt->vcpu); | 833 | ctxt->vcpu); |
836 | if (rc == X86EMUL_PROPAGATE_FAULT) | 834 | if (rc == X86EMUL_PROPAGATE_FAULT) |
837 | emulate_pf(ctxt, addr, err); | 835 | emulate_pf(ctxt); |
838 | if (rc != X86EMUL_CONTINUE) | 836 | if (rc != X86EMUL_CONTINUE) |
839 | return rc; | 837 | return rc; |
840 | mc->end += n; | 838 | mc->end += n; |
@@ -921,7 +919,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
921 | addr = dt.address + index * 8; | 919 | addr = dt.address + index * 8; |
922 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 920 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); |
923 | if (ret == X86EMUL_PROPAGATE_FAULT) | 921 | if (ret == X86EMUL_PROPAGATE_FAULT) |
924 | emulate_pf(ctxt, addr, err); | 922 | emulate_pf(ctxt); |
925 | 923 | ||
926 | return ret; | 924 | return ret; |
927 | } | 925 | } |
@@ -947,7 +945,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
947 | addr = dt.address + index * 8; | 945 | addr = dt.address + index * 8; |
948 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 946 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); |
949 | if (ret == X86EMUL_PROPAGATE_FAULT) | 947 | if (ret == X86EMUL_PROPAGATE_FAULT) |
950 | emulate_pf(ctxt, addr, err); | 948 | emulate_pf(ctxt); |
951 | 949 | ||
952 | return ret; | 950 | return ret; |
953 | } | 951 | } |
@@ -1117,7 +1115,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1117 | &err, | 1115 | &err, |
1118 | ctxt->vcpu); | 1116 | ctxt->vcpu); |
1119 | if (rc == X86EMUL_PROPAGATE_FAULT) | 1117 | if (rc == X86EMUL_PROPAGATE_FAULT) |
1120 | emulate_pf(ctxt, c->dst.addr.mem, err); | 1118 | emulate_pf(ctxt); |
1121 | if (rc != X86EMUL_CONTINUE) | 1119 | if (rc != X86EMUL_CONTINUE) |
1122 | return rc; | 1120 | return rc; |
1123 | break; | 1121 | break; |
@@ -1939,7 +1937,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1939 | &err); | 1937 | &err); |
1940 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 1938 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
1941 | /* FIXME: need to provide precise fault address */ | 1939 | /* FIXME: need to provide precise fault address */ |
1942 | emulate_pf(ctxt, old_tss_base, err); | 1940 | emulate_pf(ctxt); |
1943 | return ret; | 1941 | return ret; |
1944 | } | 1942 | } |
1945 | 1943 | ||
@@ -1949,7 +1947,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1949 | &err); | 1947 | &err); |
1950 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 1948 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
1951 | /* FIXME: need to provide precise fault address */ | 1949 | /* FIXME: need to provide precise fault address */ |
1952 | emulate_pf(ctxt, old_tss_base, err); | 1950 | emulate_pf(ctxt); |
1953 | return ret; | 1951 | return ret; |
1954 | } | 1952 | } |
1955 | 1953 | ||
@@ -1957,7 +1955,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1957 | &err); | 1955 | &err); |
1958 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 1956 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
1959 | /* FIXME: need to provide precise fault address */ | 1957 | /* FIXME: need to provide precise fault address */ |
1960 | emulate_pf(ctxt, new_tss_base, err); | 1958 | emulate_pf(ctxt); |
1961 | return ret; | 1959 | return ret; |
1962 | } | 1960 | } |
1963 | 1961 | ||
@@ -1970,7 +1968,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1970 | ctxt->vcpu, &err); | 1968 | ctxt->vcpu, &err); |
1971 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 1969 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
1972 | /* FIXME: need to provide precise fault address */ | 1970 | /* FIXME: need to provide precise fault address */ |
1973 | emulate_pf(ctxt, new_tss_base, err); | 1971 | emulate_pf(ctxt); |
1974 | return ret; | 1972 | return ret; |
1975 | } | 1973 | } |
1976 | } | 1974 | } |
@@ -2081,7 +2079,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2081 | &err); | 2079 | &err); |
2082 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2080 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2083 | /* FIXME: need to provide precise fault address */ | 2081 | /* FIXME: need to provide precise fault address */ |
2084 | emulate_pf(ctxt, old_tss_base, err); | 2082 | emulate_pf(ctxt); |
2085 | return ret; | 2083 | return ret; |
2086 | } | 2084 | } |
2087 | 2085 | ||
@@ -2091,7 +2089,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2091 | &err); | 2089 | &err); |
2092 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2090 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2093 | /* FIXME: need to provide precise fault address */ | 2091 | /* FIXME: need to provide precise fault address */ |
2094 | emulate_pf(ctxt, old_tss_base, err); | 2092 | emulate_pf(ctxt); |
2095 | return ret; | 2093 | return ret; |
2096 | } | 2094 | } |
2097 | 2095 | ||
@@ -2099,7 +2097,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2099 | &err); | 2097 | &err); |
2100 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2098 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2101 | /* FIXME: need to provide precise fault address */ | 2099 | /* FIXME: need to provide precise fault address */ |
2102 | emulate_pf(ctxt, new_tss_base, err); | 2100 | emulate_pf(ctxt); |
2103 | return ret; | 2101 | return ret; |
2104 | } | 2102 | } |
2105 | 2103 | ||
@@ -2112,7 +2110,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2112 | ctxt->vcpu, &err); | 2110 | ctxt->vcpu, &err); |
2113 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2111 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2114 | /* FIXME: need to provide precise fault address */ | 2112 | /* FIXME: need to provide precise fault address */ |
2115 | emulate_pf(ctxt, new_tss_base, err); | 2113 | emulate_pf(ctxt); |
2116 | return ret; | 2114 | return ret; |
2117 | } | 2115 | } |
2118 | } | 2116 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 86f7557cf3fb..99367274b97c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2566,11 +2566,9 @@ static unsigned long get_cr3(struct kvm_vcpu *vcpu) | |||
2566 | return vcpu->arch.cr3; | 2566 | return vcpu->arch.cr3; |
2567 | } | 2567 | } |
2568 | 2568 | ||
2569 | static void inject_page_fault(struct kvm_vcpu *vcpu, | 2569 | static void inject_page_fault(struct kvm_vcpu *vcpu) |
2570 | u64 addr, | ||
2571 | u32 err_code) | ||
2572 | { | 2570 | { |
2573 | vcpu->arch.mmu.inject_page_fault(vcpu, addr, err_code); | 2571 | vcpu->arch.mmu.inject_page_fault(vcpu); |
2574 | } | 2572 | } |
2575 | 2573 | ||
2576 | static void paging_free(struct kvm_vcpu *vcpu) | 2574 | static void paging_free(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 68ee1b7fa89f..d07f48a06f09 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -258,6 +258,10 @@ error: | |||
258 | walker->error_code |= PFERR_FETCH_MASK; | 258 | walker->error_code |= PFERR_FETCH_MASK; |
259 | if (rsvd_fault) | 259 | if (rsvd_fault) |
260 | walker->error_code |= PFERR_RSVD_MASK; | 260 | walker->error_code |= PFERR_RSVD_MASK; |
261 | |||
262 | vcpu->arch.fault.address = addr; | ||
263 | vcpu->arch.fault.error_code = walker->error_code; | ||
264 | |||
261 | trace_kvm_mmu_walker_error(walker->error_code); | 265 | trace_kvm_mmu_walker_error(walker->error_code); |
262 | return 0; | 266 | return 0; |
263 | } | 267 | } |
@@ -521,7 +525,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
521 | */ | 525 | */ |
522 | if (!r) { | 526 | if (!r) { |
523 | pgprintk("%s: guest page fault\n", __func__); | 527 | pgprintk("%s: guest page fault\n", __func__); |
524 | inject_page_fault(vcpu, addr, walker.error_code); | 528 | inject_page_fault(vcpu); |
525 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 529 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
526 | return 0; | 530 | return 0; |
527 | } | 531 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9d434777154d..48b74d2fbfb7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -329,11 +329,12 @@ void kvm_requeue_exception(struct kvm_vcpu *vcpu, unsigned nr) | |||
329 | } | 329 | } |
330 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); | 330 | EXPORT_SYMBOL_GPL(kvm_requeue_exception); |
331 | 331 | ||
332 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu, unsigned long addr, | 332 | void kvm_inject_page_fault(struct kvm_vcpu *vcpu) |
333 | u32 error_code) | ||
334 | { | 333 | { |
334 | unsigned error_code = vcpu->arch.fault.error_code; | ||
335 | |||
335 | ++vcpu->stat.pf_guest; | 336 | ++vcpu->stat.pf_guest; |
336 | vcpu->arch.cr2 = addr; | 337 | vcpu->arch.cr2 = vcpu->arch.fault.address; |
337 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); | 338 | kvm_queue_exception_e(vcpu, PF_VECTOR, error_code); |
338 | } | 339 | } |
339 | 340 | ||
@@ -4080,7 +4081,7 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) | |||
4080 | { | 4081 | { |
4081 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 4082 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
4082 | if (ctxt->exception == PF_VECTOR) | 4083 | if (ctxt->exception == PF_VECTOR) |
4083 | kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code); | 4084 | kvm_inject_page_fault(vcpu); |
4084 | else if (ctxt->error_code_valid) | 4085 | else if (ctxt->error_code_valid) |
4085 | kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); | 4086 | kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code); |
4086 | else | 4087 | else |