aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c34
-rw-r--r--arch/x86/kvm/paging_tmpl.h17
2 files changed, 23 insertions, 28 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6f8392d4034..6651dfadae5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -101,8 +101,6 @@ static int dbg = 1;
101#define PT_FIRST_AVAIL_BITS_SHIFT 9 101#define PT_FIRST_AVAIL_BITS_SHIFT 9
102#define PT64_SECOND_AVAIL_BITS_SHIFT 52 102#define PT64_SECOND_AVAIL_BITS_SHIFT 52
103 103
104#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
105
106#define VALID_PAGE(x) ((x) != INVALID_PAGE) 104#define VALID_PAGE(x) ((x) != INVALID_PAGE)
107 105
108#define PT64_LEVEL_BITS 9 106#define PT64_LEVEL_BITS 9
@@ -200,7 +198,6 @@ static int is_present_pte(unsigned long pte)
200 198
201static int is_shadow_present_pte(u64 pte) 199static int is_shadow_present_pte(u64 pte)
202{ 200{
203 pte &= ~PT_SHADOW_IO_MARK;
204 return pte != shadow_trap_nonpresent_pte 201 return pte != shadow_trap_nonpresent_pte
205 && pte != shadow_notrap_nonpresent_pte; 202 && pte != shadow_notrap_nonpresent_pte;
206} 203}
@@ -215,11 +212,6 @@ static int is_dirty_pte(unsigned long pte)
215 return pte & PT_DIRTY_MASK; 212 return pte & PT_DIRTY_MASK;
216} 213}
217 214
218static int is_io_pte(unsigned long pte)
219{
220 return pte & PT_SHADOW_IO_MARK;
221}
222
223static int is_rmap_pte(u64 pte) 215static int is_rmap_pte(u64 pte)
224{ 216{
225 return is_shadow_present_pte(pte); 217 return is_shadow_present_pte(pte);
@@ -538,7 +530,7 @@ static int is_empty_shadow_page(u64 *spt)
538 u64 *end; 530 u64 *end;
539 531
540 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) 532 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
541 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) { 533 if (*pos != shadow_trap_nonpresent_pte) {
542 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, 534 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
543 pos, *pos); 535 pos, *pos);
544 return 0; 536 return 0;
@@ -926,13 +918,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
926 if (pte_access & ACC_USER_MASK) 918 if (pte_access & ACC_USER_MASK)
927 spte |= PT_USER_MASK; 919 spte |= PT_USER_MASK;
928 920
929 if (is_error_page(page)) {
930 set_shadow_pte(shadow_pte,
931 shadow_trap_nonpresent_pte | PT_SHADOW_IO_MARK);
932 kvm_release_page_clean(page);
933 return;
934 }
935
936 spte |= page_to_phys(page); 921 spte |= page_to_phys(page);
937 922
938 if ((pte_access & ACC_WRITE_MASK) 923 if ((pte_access & ACC_WRITE_MASK)
@@ -1002,7 +987,7 @@ static int __nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write,
1002 if (level == 1) { 987 if (level == 1) {
1003 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL, 988 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1004 0, write, 1, &pt_write, gfn, page); 989 0, write, 1, &pt_write, gfn, page);
1005 return pt_write || is_io_pte(table[index]); 990 return pt_write;
1006 } 991 }
1007 992
1008 if (table[index] == shadow_trap_nonpresent_pte) { 993 if (table[index] == shadow_trap_nonpresent_pte) {
@@ -1039,6 +1024,13 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1039 page = gfn_to_page(vcpu->kvm, gfn); 1024 page = gfn_to_page(vcpu->kvm, gfn);
1040 up_read(&current->mm->mmap_sem); 1025 up_read(&current->mm->mmap_sem);
1041 1026
1027 /* mmio */
1028 if (is_error_page(page)) {
1029 kvm_release_page_clean(page);
1030 up_read(&vcpu->kvm->slots_lock);
1031 return 1;
1032 }
1033
1042 spin_lock(&vcpu->kvm->mmu_lock); 1034 spin_lock(&vcpu->kvm->mmu_lock);
1043 kvm_mmu_free_some_pages(vcpu); 1035 kvm_mmu_free_some_pages(vcpu);
1044 r = __nonpaging_map(vcpu, v, write, gfn, page); 1036 r = __nonpaging_map(vcpu, v, write, gfn, page);
@@ -1406,10 +1398,14 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1406 return; 1398 return;
1407 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 1399 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1408 1400
1409 down_read(&current->mm->mmap_sem); 1401 down_read(&vcpu->kvm->slots_lock);
1410 page = gfn_to_page(vcpu->kvm, gfn); 1402 page = gfn_to_page(vcpu->kvm, gfn);
1411 up_read(&current->mm->mmap_sem); 1403 up_read(&vcpu->kvm->slots_lock);
1412 1404
1405 if (is_error_page(page)) {
1406 kvm_release_page_clean(page);
1407 return;
1408 }
1413 vcpu->arch.update_pte.gfn = gfn; 1409 vcpu->arch.update_pte.gfn = gfn;
1414 vcpu->arch.update_pte.page = page; 1410 vcpu->arch.update_pte.page = page;
1415} 1411}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c2fd2b96144..4b55f462e2b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -399,6 +399,14 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
399 page = gfn_to_page(vcpu->kvm, walker.gfn); 399 page = gfn_to_page(vcpu->kvm, walker.gfn);
400 up_read(&current->mm->mmap_sem); 400 up_read(&current->mm->mmap_sem);
401 401
402 /* mmio */
403 if (is_error_page(page)) {
404 pgprintk("gfn %x is mmio\n", walker.gfn);
405 kvm_release_page_clean(page);
406 up_read(&vcpu->kvm->slots_lock);
407 return 1;
408 }
409
402 spin_lock(&vcpu->kvm->mmu_lock); 410 spin_lock(&vcpu->kvm->mmu_lock);
403 kvm_mmu_free_some_pages(vcpu); 411 kvm_mmu_free_some_pages(vcpu);
404 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, 412 shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
@@ -409,15 +417,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
409 if (!write_pt) 417 if (!write_pt)
410 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ 418 vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
411 419
412 /*
413 * mmio: emulate if accessible, otherwise its a guest fault.
414 */
415 if (shadow_pte && is_io_pte(*shadow_pte)) {
416 spin_unlock(&vcpu->kvm->mmu_lock);
417 up_read(&vcpu->kvm->slots_lock);
418 return 1;
419 }
420
421 ++vcpu->stat.pf_fixed; 420 ++vcpu->stat.pf_fixed;
422 kvm_mmu_audit(vcpu, "post page fault (fixed)"); 421 kvm_mmu_audit(vcpu, "post page fault (fixed)");
423 spin_unlock(&vcpu->kvm->mmu_lock); 422 spin_unlock(&vcpu->kvm->mmu_lock);