aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorRoedel, Joerg <Joerg.Roedel@amd.com>2011-04-20 09:33:16 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:39:26 -0400
commita78484c60e35555d6e0e5b1eb83d4913621c59fb (patch)
treeca0c7ec1e95aa3b6087ae0191a19fa1e0d4ca6b0 /arch/x86/kvm
parent13db70eca62c5bbb2cbbf6b23dadb94065d363d1 (diff)
KVM: MMU: Make cmpxchg_gpte aware of nesting too
This patch makes the cmpxchg_gpte() function aware of the difference between l1-gfns and l2-gfns when nested virtualization is in use. This fixes a potential data-corruption problem in the l1-guest and makes the code work correct (at least as correct as the hardware which is emulated in this code) again. Cc: stable@kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/paging_tmpl.h30
1 files changed, 23 insertions, 7 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 74f8567d57ac..1b6899088f97 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -78,15 +78,21 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; 78 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT;
79} 79}
80 80
81static bool FNAME(cmpxchg_gpte)(struct kvm *kvm, 81static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
82 gfn_t table_gfn, unsigned index, 82 gfn_t table_gfn, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte) 83 pt_element_t orig_pte, pt_element_t new_pte)
84{ 84{
85 pt_element_t ret; 85 pt_element_t ret;
86 pt_element_t *table; 86 pt_element_t *table;
87 struct page *page; 87 struct page *page;
88 gpa_t gpa;
88 89
89 page = gfn_to_page(kvm, table_gfn); 90 gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT,
91 PFERR_USER_MASK|PFERR_WRITE_MASK);
92 if (gpa == UNMAPPED_GVA)
93 return -EFAULT;
94
95 page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
90 96
91 table = kmap_atomic(page, KM_USER0); 97 table = kmap_atomic(page, KM_USER0);
92 ret = CMPXCHG(&table[index], orig_pte, new_pte); 98 ret = CMPXCHG(&table[index], orig_pte, new_pte);
@@ -192,11 +198,17 @@ walk:
192#endif 198#endif
193 199
194 if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) { 200 if (!eperm && !rsvd_fault && !(pte & PT_ACCESSED_MASK)) {
201 int ret;
195 trace_kvm_mmu_set_accessed_bit(table_gfn, index, 202 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
196 sizeof(pte)); 203 sizeof(pte));
197 if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, 204 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn,
198 index, pte, pte|PT_ACCESSED_MASK)) 205 index, pte, pte|PT_ACCESSED_MASK);
206 if (ret < 0) {
207 present = false;
208 break;
209 } else if (ret)
199 goto walk; 210 goto walk;
211
200 mark_page_dirty(vcpu->kvm, table_gfn); 212 mark_page_dirty(vcpu->kvm, table_gfn);
201 pte |= PT_ACCESSED_MASK; 213 pte |= PT_ACCESSED_MASK;
202 } 214 }
@@ -245,13 +257,17 @@ walk:
245 goto error; 257 goto error;
246 258
247 if (write_fault && !is_dirty_gpte(pte)) { 259 if (write_fault && !is_dirty_gpte(pte)) {
248 bool ret; 260 int ret;
249 261
250 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 262 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
251 ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, 263 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte,
252 pte|PT_DIRTY_MASK); 264 pte|PT_DIRTY_MASK);
253 if (ret) 265 if (ret < 0) {
266 present = false;
267 goto error;
268 } else if (ret)
254 goto walk; 269 goto walk;
270
255 mark_page_dirty(vcpu->kvm, table_gfn); 271 mark_page_dirty(vcpu->kvm, table_gfn);
256 pte |= PT_DIRTY_MASK; 272 pte |= PT_DIRTY_MASK;
257 walker->ptes[walker->level - 1] = pte; 273 walker->ptes[walker->level - 1] = pte;