aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLadi Prosek <lprosek@redhat.com>2017-10-05 05:10:23 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2017-10-10 09:31:28 -0400
commit829ee279aed43faa5cb1e4d65c0cad52f2426c53 (patch)
tree4d820edec657b7fa4c79e1877b87ff86d5c3aed9
parentfd19d3b45164466a4adce7cbff448ba9189e1427 (diff)
KVM: MMU: always terminate page walks at level 1
is_last_gpte() is not equivalent to the pseudo-code given in commit 6bb69c9b69c31 ("KVM: MMU: simplify last_pte_bitmap") because an incorrect value of last_nonleaf_level may override the result even if level == 1. It is critical for is_last_gpte() to return true on level == 1 to terminate page walks. Otherwise memory corruption may occur as level is used as an index to various data structures throughout the page walking code. Even though the actual bug would be wherever the MMU is initialized (as in the previous patch), be defensive and ensure here that is_last_gpte() returns the correct value. This patch is also enough to fix CVE-2017-12188. Fixes: 6bb69c9b69c315200ddc2bc79aee14c0184cf5b2 Cc: stable@vger.kernel.org Cc: Andy Honig <ahonig@google.com> Signed-off-by: Ladi Prosek <lprosek@redhat.com> [Panic if walk_addr_generic gets an incorrect level; this is a serious bug and it's not worth a WARN_ON where the recovery path might hide further exploitable issues; suggested by Andrew Honig. - Paolo] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c14
-rw-r--r--arch/x86/kvm/paging_tmpl.h3
2 files changed, 9 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3c25f20115bc..7a69cf053711 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu,
3974 unsigned level, unsigned gpte) 3974 unsigned level, unsigned gpte)
3975{ 3975{
3976 /* 3976 /*
3977 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3978 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3979 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3980 */
3981 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3982
3983 /*
3984 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. 3977 * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
3985 * If it is clear, there are no large pages at this level, so clear 3978 * If it is clear, there are no large pages at this level, so clear
3986 * PT_PAGE_SIZE_MASK in gpte if that is the case. 3979 * PT_PAGE_SIZE_MASK in gpte if that is the case.
3987 */ 3980 */
3988 gpte &= level - mmu->last_nonleaf_level; 3981 gpte &= level - mmu->last_nonleaf_level;
3989 3982
3983 /*
3984 * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
3985 * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
3986 * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
3987 */
3988 gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
3989
3990 return gpte & PT_PAGE_SIZE_MASK; 3990 return gpte & PT_PAGE_SIZE_MASK;
3991} 3991}
3992 3992
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 86b68dc5a649..f18d1f8d332b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -334,10 +334,11 @@ retry_walk:
334 --walker->level; 334 --walker->level;
335 335
336 index = PT_INDEX(addr, walker->level); 336 index = PT_INDEX(addr, walker->level);
337
338 table_gfn = gpte_to_gfn(pte); 337 table_gfn = gpte_to_gfn(pte);
339 offset = index * sizeof(pt_element_t); 338 offset = index * sizeof(pt_element_t);
340 pte_gpa = gfn_to_gpa(table_gfn) + offset; 339 pte_gpa = gfn_to_gpa(table_gfn) + offset;
340
341 BUG_ON(walker->level < 1);
341 walker->table_gfn[walker->level - 1] = table_gfn; 342 walker->table_gfn[walker->level - 1] = table_gfn;
342 walker->pte_gpa[walker->level - 1] = pte_gpa; 343 walker->pte_gpa[walker->level - 1] = pte_gpa;
343 344