aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c24
1 files changed, 20 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d7e9bce6ff61..51b953ad9d4e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
249 */ 249 */
250static const u64 shadow_nonpresent_or_rsvd_mask_len = 5; 250static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
251 251
252/*
253 * In some cases, we need to preserve the GFN of a non-present or reserved
254 * SPTE when we usurp the upper five bits of the physical address space to
255 * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
256 * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
257 * left into the reserved bits, i.e. the GFN in the SPTE will be split into
258 * high and low parts. This mask covers the lower bits of the GFN.
259 */
260static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
261
262
252static void mmu_spte_set(u64 *sptep, u64 spte); 263static void mmu_spte_set(u64 *sptep, u64 spte);
253static union kvm_mmu_page_role 264static union kvm_mmu_page_role
254kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu); 265kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
@@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
357 368
358static gfn_t get_mmio_spte_gfn(u64 spte) 369static gfn_t get_mmio_spte_gfn(u64 spte)
359{ 370{
360 u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask | 371 u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
361 shadow_nonpresent_or_rsvd_mask;
362 u64 gpa = spte & ~mask;
363 372
364 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len) 373 gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
365 & shadow_nonpresent_or_rsvd_mask; 374 & shadow_nonpresent_or_rsvd_mask;
@@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
423 432
424static void kvm_mmu_reset_all_pte_masks(void) 433static void kvm_mmu_reset_all_pte_masks(void)
425{ 434{
435 u8 low_phys_bits;
436
426 shadow_user_mask = 0; 437 shadow_user_mask = 0;
427 shadow_accessed_mask = 0; 438 shadow_accessed_mask = 0;
428 shadow_dirty_mask = 0; 439 shadow_dirty_mask = 0;
@@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
437 * appropriate mask to guard against L1TF attacks. Otherwise, it is 448 * appropriate mask to guard against L1TF attacks. Otherwise, it is
438 * assumed that the CPU is not vulnerable to L1TF. 449 * assumed that the CPU is not vulnerable to L1TF.
439 */ 450 */
451 low_phys_bits = boot_cpu_data.x86_phys_bits;
440 if (boot_cpu_data.x86_phys_bits < 452 if (boot_cpu_data.x86_phys_bits <
441 52 - shadow_nonpresent_or_rsvd_mask_len) 453 52 - shadow_nonpresent_or_rsvd_mask_len) {
442 shadow_nonpresent_or_rsvd_mask = 454 shadow_nonpresent_or_rsvd_mask =
443 rsvd_bits(boot_cpu_data.x86_phys_bits - 455 rsvd_bits(boot_cpu_data.x86_phys_bits -
444 shadow_nonpresent_or_rsvd_mask_len, 456 shadow_nonpresent_or_rsvd_mask_len,
445 boot_cpu_data.x86_phys_bits - 1); 457 boot_cpu_data.x86_phys_bits - 1);
458 low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
459 }
460 shadow_nonpresent_or_rsvd_lower_gfn_mask =
461 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
446} 462}
447 463
448static int is_cpuid_PSE36(void) 464static int is_cpuid_PSE36(void)