diff options
author | Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> | 2011-05-07 03:31:36 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-22 08:47:54 -0400 |
commit | 12cb814f3bb35736420cc6bfc9fed7b6a9d3a828 (patch) | |
tree | 86371ff3cc2a2e9e08211cd20c464b8efbabb1d6 /arch | |
parent | 5ce941ee4258b836cf818d2ac159d8cf3ebad648 (diff) |
KVM: MMU: Clean up gpte reading with copy_from_user()
When we optimized walk_addr_generic() by not using the generic guest
memory reader, we replaced copy_from_user() with get_user():
commit e30d2a170506830d5eef5e9d7990c5aedf1b0a51
KVM: MMU: Optimize guest page table walk
commit 15e2ac9a43d4d7d08088e404fddf2533a8e7d52e
KVM: MMU: Fix 64-bit paging breakage on x86_32
But as Andi pointed out later, copy_from_user() does the same as
get_user() as long as we give a constant size to it.
So we use copy_from_user() to clean up the code.
The only, noticeable, regression introduced by this is 64-bit gpte
reading on x86_32 hosts needed for PAE guests.
But this can be mitigated by implementing 8-byte get_user() for x86_32,
if needed.
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 16 |
1 files changed, 1 insertions, 15 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 52450a6b784f..88ca456ccd68 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -115,20 +115,6 @@ static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte) | |||
115 | return access; | 115 | return access; |
116 | } | 116 | } |
117 | 117 | ||
118 | static int FNAME(read_gpte)(pt_element_t *pte, pt_element_t __user *ptep_user) | ||
119 | { | ||
120 | #if defined(CONFIG_X86_32) && (PTTYPE == 64) | ||
121 | u32 *p = (u32 *)pte; | ||
122 | u32 __user *p_user = (u32 __user *)ptep_user; | ||
123 | |||
124 | if (unlikely(get_user(*p, p_user))) | ||
125 | return -EFAULT; | ||
126 | return get_user(*(p + 1), p_user + 1); | ||
127 | #else | ||
128 | return get_user(*pte, ptep_user); | ||
129 | #endif | ||
130 | } | ||
131 | |||
132 | /* | 118 | /* |
133 | * Fetch a guest pte for a guest virtual address | 119 | * Fetch a guest pte for a guest virtual address |
134 | */ | 120 | */ |
@@ -199,7 +185,7 @@ walk: | |||
199 | } | 185 | } |
200 | 186 | ||
201 | ptep_user = (pt_element_t __user *)((void *)host_addr + offset); | 187 | ptep_user = (pt_element_t __user *)((void *)host_addr + offset); |
202 | if (unlikely(FNAME(read_gpte)(&pte, ptep_user))) { | 188 | if (unlikely(copy_from_user(&pte, ptep_user, sizeof(pte)))) { |
203 | present = false; | 189 | present = false; |
204 | break; | 190 | break; |
205 | } | 191 | } |