aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-05-01 01:33:07 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:48:14 -0400
commitc8cfbb555eb3632bf3dcbe1a591c1f4d0c28681c (patch)
tree4d7b9e26a8f1eee7c598bb132a6cd24e3aa4b211 /arch
parent85722cda308c0ad7390dc910139b2ce58c11b9c4 (diff)
KVM: MMU: Use ptep_user for cmpxchg_gpte()
The address of the gpte was already calculated and stored in ptep_user before entering cmpxchg_gpte(). This patch makes cmpxchg_gpte() to use that to make it clear that we are using the same address during walk_addr_generic(). Note that the unlikely annotations are used to show that the conditions are something unusual rather than for performance. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/paging_tmpl.h26
1 files changed, 12 insertions, 14 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e3f81418797e..6c4dc010c4cb 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -79,21 +79,19 @@ static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl)
79} 79}
80 80
81static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 81static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
82 gfn_t table_gfn, unsigned index, 82 pt_element_t __user *ptep_user, unsigned index,
83 pt_element_t orig_pte, pt_element_t new_pte) 83 pt_element_t orig_pte, pt_element_t new_pte)
84{ 84{
85 int npages;
85 pt_element_t ret; 86 pt_element_t ret;
86 pt_element_t *table; 87 pt_element_t *table;
87 struct page *page; 88 struct page *page;
88 gpa_t gpa;
89 89
90 gpa = mmu->translate_gpa(vcpu, table_gfn << PAGE_SHIFT, 90 npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
91 PFERR_USER_MASK|PFERR_WRITE_MASK); 91 /* Check if the user is doing something meaningless. */
92 if (gpa == UNMAPPED_GVA) 92 if (unlikely(npages != 1))
93 return -EFAULT; 93 return -EFAULT;
94 94
95 page = gfn_to_page(vcpu->kvm, gpa_to_gfn(gpa));
96
97 table = kmap_atomic(page, KM_USER0); 95 table = kmap_atomic(page, KM_USER0);
98 ret = CMPXCHG(&table[index], orig_pte, new_pte); 96 ret = CMPXCHG(&table[index], orig_pte, new_pte);
99 kunmap_atomic(table, KM_USER0); 97 kunmap_atomic(table, KM_USER0);
@@ -220,9 +218,9 @@ walk:
220 int ret; 218 int ret;
221 trace_kvm_mmu_set_accessed_bit(table_gfn, index, 219 trace_kvm_mmu_set_accessed_bit(table_gfn, index,
222 sizeof(pte)); 220 sizeof(pte));
223 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, 221 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
224 index, pte, pte|PT_ACCESSED_MASK); 222 pte, pte|PT_ACCESSED_MASK);
225 if (ret < 0) { 223 if (unlikely(ret < 0)) {
226 present = false; 224 present = false;
227 break; 225 break;
228 } else if (ret) 226 } else if (ret)
@@ -279,9 +277,9 @@ walk:
279 int ret; 277 int ret;
280 278
281 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); 279 trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte));
282 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, table_gfn, index, pte, 280 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index,
283 pte|PT_DIRTY_MASK); 281 pte, pte|PT_DIRTY_MASK);
284 if (ret < 0) { 282 if (unlikely(ret < 0)) {
285 present = false; 283 present = false;
286 goto error; 284 goto error;
287 } else if (ret) 285 } else if (ret)