diff options
author | Avi Kivity <avi@qumranet.com> | 2007-01-05 19:36:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.osdl.org> | 2007-01-06 02:55:24 -0500 |
commit | 1b0973bd8f788178f21d9eebdd879203464f8528 (patch) | |
tree | 265524c2c66575cf6c64fd81d1a86611aa881040 /drivers/kvm | |
parent | 17ac10ad2bb7d8c4f401668484b2e661a15726c6 (diff) |
[PATCH] KVM: MMU: Use the guest pdptrs instead of mapping cr3 in pae mode
This lets us not write protect a partial page, and is anyway what a real
processor does.
Signed-off-by: Avi Kivity <avi@qumranet.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r-- | drivers/kvm/kvm_main.c | 2 | ||||
-rw-r--r-- | drivers/kvm/paging_tmpl.h | 28 |
2 files changed, 20 insertions, 10 deletions
diff --git a/drivers/kvm/kvm_main.c b/drivers/kvm/kvm_main.c index 4512d8c39c84..68e121eeccbc 100644 --- a/drivers/kvm/kvm_main.c +++ b/drivers/kvm/kvm_main.c | |||
@@ -1491,6 +1491,8 @@ static int kvm_dev_ioctl_set_sregs(struct kvm *kvm, struct kvm_sregs *sregs) | |||
1491 | 1491 | ||
1492 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; | 1492 | mmu_reset_needed |= vcpu->cr4 != sregs->cr4; |
1493 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); | 1493 | kvm_arch_ops->set_cr4(vcpu, sregs->cr4); |
1494 | if (!is_long_mode(vcpu) && is_pae(vcpu)) | ||
1495 | load_pdptrs(vcpu, vcpu->cr3); | ||
1494 | 1496 | ||
1495 | if (mmu_reset_needed) | 1497 | if (mmu_reset_needed) |
1496 | kvm_mmu_reset_context(vcpu); | 1498 | kvm_mmu_reset_context(vcpu); |
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h index 963d80e2271f..3ade9445ab23 100644 --- a/drivers/kvm/paging_tmpl.h +++ b/drivers/kvm/paging_tmpl.h | |||
@@ -67,18 +67,28 @@ static void FNAME(walk_addr)(struct guest_walker *walker, | |||
67 | hpa_t hpa; | 67 | hpa_t hpa; |
68 | struct kvm_memory_slot *slot; | 68 | struct kvm_memory_slot *slot; |
69 | pt_element_t *ptep; | 69 | pt_element_t *ptep; |
70 | pt_element_t root; | ||
70 | 71 | ||
71 | walker->level = vcpu->mmu.root_level; | 72 | walker->level = vcpu->mmu.root_level; |
72 | walker->table_gfn = (vcpu->cr3 & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 73 | walker->table = NULL; |
74 | root = vcpu->cr3; | ||
75 | #if PTTYPE == 64 | ||
76 | if (!is_long_mode(vcpu)) { | ||
77 | walker->ptep = &vcpu->pdptrs[(addr >> 30) & 3]; | ||
78 | root = *walker->ptep; | ||
79 | if (!(root & PT_PRESENT_MASK)) | ||
80 | return; | ||
81 | --walker->level; | ||
82 | } | ||
83 | #endif | ||
84 | walker->table_gfn = (root & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | ||
73 | slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn); | 85 | slot = gfn_to_memslot(vcpu->kvm, walker->table_gfn); |
74 | hpa = safe_gpa_to_hpa(vcpu, vcpu->cr3 & PT64_BASE_ADDR_MASK); | 86 | hpa = safe_gpa_to_hpa(vcpu, root & PT64_BASE_ADDR_MASK); |
75 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); | 87 | walker->table = kmap_atomic(pfn_to_page(hpa >> PAGE_SHIFT), KM_USER0); |
76 | 88 | ||
77 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || | 89 | ASSERT((!is_long_mode(vcpu) && is_pae(vcpu)) || |
78 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); | 90 | (vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) == 0); |
79 | 91 | ||
80 | walker->table = (pt_element_t *)( (unsigned long)walker->table | | ||
81 | (unsigned long)(vcpu->cr3 & ~(PAGE_MASK | CR3_FLAGS_MASK)) ); | ||
82 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; | 92 | walker->inherited_ar = PT_USER_MASK | PT_WRITABLE_MASK; |
83 | 93 | ||
84 | for (;;) { | 94 | for (;;) { |
@@ -89,11 +99,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker, | |||
89 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == | 99 | ASSERT(((unsigned long)walker->table & PAGE_MASK) == |
90 | ((unsigned long)ptep & PAGE_MASK)); | 100 | ((unsigned long)ptep & PAGE_MASK)); |
91 | 101 | ||
92 | /* Don't set accessed bit on PAE PDPTRs */ | 102 | if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK)) |
93 | if (vcpu->mmu.root_level != 3 || walker->level != 3) | 103 | *ptep |= PT_ACCESSED_MASK; |
94 | if ((*ptep & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) | ||
95 | == PT_PRESENT_MASK) | ||
96 | *ptep |= PT_ACCESSED_MASK; | ||
97 | 104 | ||
98 | if (!is_present_pte(*ptep) || | 105 | if (!is_present_pte(*ptep) || |
99 | walker->level == PT_PAGE_TABLE_LEVEL || | 106 | walker->level == PT_PAGE_TABLE_LEVEL || |
@@ -116,7 +123,8 @@ static void FNAME(walk_addr)(struct guest_walker *walker, | |||
116 | 123 | ||
117 | static void FNAME(release_walker)(struct guest_walker *walker) | 124 | static void FNAME(release_walker)(struct guest_walker *walker) |
118 | { | 125 | { |
119 | kunmap_atomic(walker->table, KM_USER0); | 126 | if (walker->table) |
127 | kunmap_atomic(walker->table, KM_USER0); | ||
120 | } | 128 | } |
121 | 129 | ||
122 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, | 130 | static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, |