aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mtrr.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mtrr.c')
-rw-r--r--arch/x86/kvm/mtrr.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index 9e8bf13572e6..3f8c732117ec 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void) 123static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
124{ 124{
125 /* 125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when 126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC 127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory. 128 * memory type is applied to all of physical memory.
129 *
130 * However, virtual machines can be run with CPUID such that
131 * there are no MTRRs. In that case, the firmware will never
132 * enable MTRRs and it is obviously undesirable to run the
133 * guest entirely with UC memory and we use WB.
129 */ 134 */
130 return MTRR_TYPE_UNCACHABLE; 135 if (guest_cpuid_has_mtrr(vcpu))
136 return MTRR_TYPE_UNCACHABLE;
137 else
138 return MTRR_TYPE_WRBACK;
131} 139}
132 140
133/* 141/*
@@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
267 275
268 for (seg = 0; seg < seg_num; seg++) { 276 for (seg = 0; seg < seg_num; seg++) {
269 mtrr_seg = &fixed_seg_table[seg]; 277 mtrr_seg = &fixed_seg_table[seg];
270 if (mtrr_seg->start >= addr && addr < mtrr_seg->end) 278 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
271 return seg; 279 return seg;
272 } 280 }
273 281
@@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
300 *start = range->base & PAGE_MASK; 308 *start = range->base & PAGE_MASK;
301 309
302 mask = range->mask & PAGE_MASK; 310 mask = range->mask & PAGE_MASK;
303 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
304 311
305 /* This cannot overflow because writing to the reserved bits of 312 /* This cannot overflow because writing to the reserved bits of
306 * variable MTRRs causes a #GP. 313 * variable MTRRs causes a #GP.
@@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
356 if (var_mtrr_range_is_valid(cur)) 363 if (var_mtrr_range_is_valid(cur))
357 list_del(&mtrr_state->var_ranges[index].node); 364 list_del(&mtrr_state->var_ranges[index].node);
358 365
366 /* Extend the mask with all 1 bits to the left, since those
367 * bits must implicitly be 0. The bits are then cleared
368 * when reading them.
369 */
359 if (!is_mtrr_mask) 370 if (!is_mtrr_mask)
360 cur->base = data; 371 cur->base = data;
361 else 372 else
362 cur->mask = data; 373 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
363 374
364 /* add it to the list if it's enabled. */ 375 /* add it to the list if it's enabled. */
365 if (var_mtrr_range_is_valid(cur)) { 376 if (var_mtrr_range_is_valid(cur)) {
@@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
426 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base; 437 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
427 else 438 else
428 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask; 439 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
440
441 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
429 } 442 }
430 443
431 return 0; 444 return 0;
@@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
670 } 683 }
671 684
672 if (iter.mtrr_disabled) 685 if (iter.mtrr_disabled)
673 return mtrr_disabled_type(); 686 return mtrr_disabled_type(vcpu);
674 687
675 /* not contained in any MTRRs. */ 688 /* not contained in any MTRRs. */
676 if (type == -1) 689 if (type == -1)