aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJunaid Shahid <junaids@google.com>2018-08-14 13:15:34 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-14 13:25:59 -0400
commit28a1f3ac1d0c8558ee4453d9634dad891a6e922e (patch)
treec5dcce8e4448224ddc737ca8e1a8dc63bd6dff44
parentfd8ca6dac9b45db8503cf508880edd63e039e2f2 (diff)
kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
Always set the 5 upper-most supported physical address bits to 1 for SPTEs that are marked as non-present or reserved, to make them unusable for L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs. (We do not need to mark PTEs that are completely 0 as physical page 0 is already reserved.) This allows mitigation of L1TF without disabling hyper-threading by using shadow paging mode instead of EPT. Signed-off-by: Junaid Shahid <junaids@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c43
-rw-r--r--arch/x86/kvm/x86.c8
2 files changed, 44 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index f5aef52b148b..27c2ab079a76 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -238,6 +238,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
238 PT64_EPT_EXECUTABLE_MASK; 238 PT64_EPT_EXECUTABLE_MASK;
239static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT; 239static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;
240 240
241/*
242 * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
243 * to guard against L1TF attacks.
244 */
245static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
246
247/*
248 * The number of high-order 1 bits to use in the mask above.
249 */
250static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
251
241static void mmu_spte_set(u64 *sptep, u64 spte); 252static void mmu_spte_set(u64 *sptep, u64 spte);
242static union kvm_mmu_page_role 253static union kvm_mmu_page_role
243kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); 254kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -327,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
327{ 338{
328 unsigned int gen = kvm_current_mmio_generation(vcpu); 339 unsigned int gen = kvm_current_mmio_generation(vcpu);
329 u64 mask = generation_mmio_spte_mask(gen); 340 u64 mask = generation_mmio_spte_mask(gen);
341 u64 gpa = gfn << PAGE_SHIFT;
330 342
331 access &= ACC_WRITE_MASK | ACC_USER_MASK; 343 access &= ACC_WRITE_MASK | ACC_USER_MASK;
332 mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT; 344 mask |= shadow_mmio_value | access;
345 mask |= gpa | shadow_nonpresent_or_rsvd_mask;
346 mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
347 << shadow_nonpresent_or_rsvd_mask_len;
333 348
334 trace_mark_mmio_spte(sptep, gfn, access, gen); 349 trace_mark_mmio_spte(sptep, gfn, access, gen);
335 mmu_spte_set(sptep, mask); 350 mmu_spte_set(sptep, mask);
@@ -342,8 +357,14 @@ static bool is_mmio_spte(u64 spte)
342 357
343static gfn_t get_mmio_spte_gfn(u64 spte) 358static gfn_t get_mmio_spte_gfn(u64 spte)
344{ 359{
345 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask; 360 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
346 return (spte & ~mask) >> PAGE_SHIFT; 361 shadow_nonpresent_or_rsvd_mask;
362 u64 gpa = spte & ~mask;
363
364 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
365 & shadow_nonpresent_or_rsvd_mask;
366
367 return gpa >> PAGE_SHIFT;
347} 368}
348 369
349static unsigned get_mmio_spte_access(u64 spte) 370static unsigned get_mmio_spte_access(u64 spte)
@@ -400,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
400} 421}
401EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 422EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
402 423
403static void kvm_mmu_clear_all_pte_masks(void) 424static void kvm_mmu_reset_all_pte_masks(void)
404{ 425{
405 shadow_user_mask = 0; 426 shadow_user_mask = 0;
406 shadow_accessed_mask = 0; 427 shadow_accessed_mask = 0;
@@ -410,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
410 shadow_mmio_mask = 0; 431 shadow_mmio_mask = 0;
411 shadow_present_mask = 0; 432 shadow_present_mask = 0;
412 shadow_acc_track_mask = 0; 433 shadow_acc_track_mask = 0;
434
435 /*
436 * If the CPU has 46 or less physical address bits, then set an
437 * appropriate mask to guard against L1TF attacks. Otherwise, it is
438 * assumed that the CPU is not vulnerable to L1TF.
439 */
440 if (boot_cpu_data.x86_phys_bits <
441 52 - shadow_nonpresent_or_rsvd_mask_len)
442 shadow_nonpresent_or_rsvd_mask =
443 rsvd_bits(boot_cpu_data.x86_phys_bits -
444 shadow_nonpresent_or_rsvd_mask_len,
445 boot_cpu_data.x86_phys_bits - 1);
413} 446}
414 447
415static int is_cpuid_PSE36(void) 448static int is_cpuid_PSE36(void)
@@ -5819,7 +5852,7 @@ int kvm_mmu_module_init(void)
5819{ 5852{
5820 int ret = -ENOMEM; 5853 int ret = -ENOMEM;
5821 5854
5822 kvm_mmu_clear_all_pte_masks(); 5855 kvm_mmu_reset_all_pte_masks();
5823 5856
5824 pte_list_desc_cache = kmem_cache_create("pte_list_desc", 5857 pte_list_desc_cache = kmem_cache_create("pte_list_desc",
5825 sizeof(struct pte_list_desc), 5858 sizeof(struct pte_list_desc),
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3c83711c0ebe..d294983ee1c0 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6536,8 +6536,12 @@ static void kvm_set_mmio_spte_mask(void)
6536 * Set the reserved bits and the present bit of an paging-structure 6536 * Set the reserved bits and the present bit of an paging-structure
6537 * entry to generate page fault with PFER.RSV = 1. 6537 * entry to generate page fault with PFER.RSV = 1.
6538 */ 6538 */
6539 /* Mask the reserved physical address bits. */ 6539
6540 mask = rsvd_bits(maxphyaddr, 51); 6540 /*
6541 * Mask the uppermost physical address bit, which would be reserved as
6542 * long as the supported physical address width is less than 52.
6543 */
6544 mask = 1ull << 51;
6541 6545
6542 /* Set the present bit. */ 6546 /* Set the present bit. */
6543 mask |= 1ull; 6547 mask |= 1ull;