aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-01-05 19:36:44 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:25 -0500
commit815af8d42ee3f844c0ceaf2104bd9c6a0bb1e26c (patch)
treecbb21a8efea0fb3fbd94689546b2f97e11dbf9d9 /drivers/kvm
parent374cbac0333ddf5cf1c6637efaf7f3adcc67fd75 (diff)
[PATCH] KVM: MMU: Let the walker extract the target page gfn from the pte
This fixes a problem where set_pte_common() looked for shadowed pages based on the page directory gfn (a huge page) instead of the actual gfn being mapped. Signed-off-by: Avi Kivity <avi@qumranet.com> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/mmu.c7
-rw-r--r--drivers/kvm/paging_tmpl.h41
2 files changed, 31 insertions, 17 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index ba813f49f8aa..ceae25bfd4b5 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -752,7 +752,8 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
752 u64 *shadow_pte, 752 u64 *shadow_pte,
753 gpa_t gaddr, 753 gpa_t gaddr,
754 int dirty, 754 int dirty,
755 u64 access_bits) 755 u64 access_bits,
756 gfn_t gfn)
756{ 757{
757 hpa_t paddr; 758 hpa_t paddr;
758 759
@@ -779,10 +780,10 @@ static inline void set_pte_common(struct kvm_vcpu *vcpu,
779 if (access_bits & PT_WRITABLE_MASK) { 780 if (access_bits & PT_WRITABLE_MASK) {
780 struct kvm_mmu_page *shadow; 781 struct kvm_mmu_page *shadow;
781 782
782 shadow = kvm_mmu_lookup_page(vcpu, gaddr >> PAGE_SHIFT); 783 shadow = kvm_mmu_lookup_page(vcpu, gfn);
783 if (shadow) { 784 if (shadow) {
784 pgprintk("%s: found shadow page for %lx, marking ro\n", 785 pgprintk("%s: found shadow page for %lx, marking ro\n",
785 __FUNCTION__, (gfn_t)(gaddr >> PAGE_SHIFT)); 786 __FUNCTION__, gfn);
786 access_bits &= ~PT_WRITABLE_MASK; 787 access_bits &= ~PT_WRITABLE_MASK;
787 *shadow_pte &= ~PT_WRITABLE_MASK; 788 *shadow_pte &= ~PT_WRITABLE_MASK;
788 } 789 }
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index cd71973c780c..cf4b74cc75b5 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -62,6 +62,7 @@ struct guest_walker {
62 pt_element_t *table; 62 pt_element_t *table;
63 pt_element_t *ptep; 63 pt_element_t *ptep;
64 pt_element_t inherited_ar; 64 pt_element_t inherited_ar;
65 gfn_t gfn;
65}; 66};
66 67
67/* 68/*
@@ -113,12 +114,23 @@ static void FNAME(walk_addr)(struct guest_walker *walker,
113 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK)) 114 if (is_present_pte(*ptep) && !(*ptep & PT_ACCESSED_MASK))
114 *ptep |= PT_ACCESSED_MASK; 115 *ptep |= PT_ACCESSED_MASK;
115 116
116 if (!is_present_pte(*ptep) || 117 if (!is_present_pte(*ptep))
117 walker->level == PT_PAGE_TABLE_LEVEL || 118 break;
118 (walker->level == PT_DIRECTORY_LEVEL && 119
119 (*ptep & PT_PAGE_SIZE_MASK) && 120 if (walker->level == PT_PAGE_TABLE_LEVEL) {
120 (PTTYPE == 64 || is_pse(vcpu)))) 121 walker->gfn = (*ptep & PT_BASE_ADDR_MASK)
122 >> PAGE_SHIFT;
123 break;
124 }
125
126 if (walker->level == PT_DIRECTORY_LEVEL
127 && (*ptep & PT_PAGE_SIZE_MASK)
128 && (PTTYPE == 64 || is_pse(vcpu))) {
129 walker->gfn = (*ptep & PT_DIR_BASE_ADDR_MASK)
130 >> PAGE_SHIFT;
131 walker->gfn += PT_INDEX(addr, PT_PAGE_TABLE_LEVEL);
121 break; 132 break;
133 }
122 134
123 if (walker->level != 3 || is_long_mode(vcpu)) 135 if (walker->level != 3 || is_long_mode(vcpu))
124 walker->inherited_ar &= walker->table[index]; 136 walker->inherited_ar &= walker->table[index];
@@ -143,30 +155,29 @@ static void FNAME(release_walker)(struct guest_walker *walker)
143} 155}
144 156
145static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte, 157static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
146 u64 *shadow_pte, u64 access_bits) 158 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
147{ 159{
148 ASSERT(*shadow_pte == 0); 160 ASSERT(*shadow_pte == 0);
149 access_bits &= guest_pte; 161 access_bits &= guest_pte;
150 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK); 162 *shadow_pte = (guest_pte & PT_PTE_COPY_MASK);
151 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK, 163 set_pte_common(vcpu, shadow_pte, guest_pte & PT_BASE_ADDR_MASK,
152 guest_pte & PT_DIRTY_MASK, access_bits); 164 guest_pte & PT_DIRTY_MASK, access_bits, gfn);
153} 165}
154 166
155static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde, 167static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
156 u64 *shadow_pte, u64 access_bits, 168 u64 *shadow_pte, u64 access_bits, gfn_t gfn)
157 int index)
158{ 169{
159 gpa_t gaddr; 170 gpa_t gaddr;
160 171
161 ASSERT(*shadow_pte == 0); 172 ASSERT(*shadow_pte == 0);
162 access_bits &= guest_pde; 173 access_bits &= guest_pde;
163 gaddr = (guest_pde & PT_DIR_BASE_ADDR_MASK) + PAGE_SIZE * index; 174 gaddr = (gpa_t)gfn << PAGE_SHIFT;
164 if (PTTYPE == 32 && is_cpuid_PSE36()) 175 if (PTTYPE == 32 && is_cpuid_PSE36())
165 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) << 176 gaddr |= (guest_pde & PT32_DIR_PSE36_MASK) <<
166 (32 - PT32_DIR_PSE36_SHIFT); 177 (32 - PT32_DIR_PSE36_SHIFT);
167 *shadow_pte = guest_pde & PT_PTE_COPY_MASK; 178 *shadow_pte = guest_pde & PT_PTE_COPY_MASK;
168 set_pte_common(vcpu, shadow_pte, gaddr, 179 set_pte_common(vcpu, shadow_pte, gaddr,
169 guest_pde & PT_DIRTY_MASK, access_bits); 180 guest_pde & PT_DIRTY_MASK, access_bits, gfn);
170} 181}
171 182
172/* 183/*
@@ -214,10 +225,12 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
214 *prev_shadow_ent |= PT_SHADOW_PS_MARK; 225 *prev_shadow_ent |= PT_SHADOW_PS_MARK;
215 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent, 226 FNAME(set_pde)(vcpu, *guest_ent, shadow_ent,
216 walker->inherited_ar, 227 walker->inherited_ar,
217 PT_INDEX(addr, PT_PAGE_TABLE_LEVEL)); 228 walker->gfn);
218 } else { 229 } else {
219 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL); 230 ASSERT(walker->level == PT_PAGE_TABLE_LEVEL);
220 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent, walker->inherited_ar); 231 FNAME(set_pte)(vcpu, *guest_ent, shadow_ent,
232 walker->inherited_ar,
233 walker->gfn);
221 } 234 }
222 return shadow_ent; 235 return shadow_ent;
223 } 236 }
@@ -291,7 +304,7 @@ static int FNAME(fix_write_pf)(struct kvm_vcpu *vcpu,
291 return 0; 304 return 0;
292 } 305 }
293 306
294 gfn = (*guest_ent & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 307 gfn = walker->gfn;
295 if (kvm_mmu_lookup_page(vcpu, gfn)) { 308 if (kvm_mmu_lookup_page(vcpu, gfn)) {
296 pgprintk("%s: found shadow page for %lx, marking ro\n", 309 pgprintk("%s: found shadow page for %lx, marking ro\n",
297 __FUNCTION__, gfn); 310 __FUNCTION__, gfn);