aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorSheng Yang <sheng@linux.intel.com>2009-04-27 08:35:42 -0400
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:49 -0400
commit4b12f0de33a64dfc624b2480f55b674f7fa23ef2 (patch)
treeed12e1230b777add7aae2b7d24fa82f8e4efbb34 /arch
parent9b62e5b10ff0f98346bcbe4a4fe3a0ca8fa7be30 (diff)
KVM: Replace get_mt_mask_shift with get_mt_mask
Shadow_mt_mask is out of date, now it have only been used as a flag to indicate if TDP enabled. Get rid of it and use tdp_enabled instead. Also put memory type logical in kvm_x86_ops->get_mt_mask(). Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/mmu.c21
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c17
-rw-r--r--arch/x86/kvm/x86.c2
5 files changed, 24 insertions, 25 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 3e94d0513208..8a6f6b643dfe 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -522,7 +522,7 @@ struct kvm_x86_ops {
522 void (*drop_interrupt_shadow)(struct kvm_vcpu *vcpu); 522 void (*drop_interrupt_shadow)(struct kvm_vcpu *vcpu);
523 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 523 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
524 int (*get_tdp_level)(void); 524 int (*get_tdp_level)(void);
525 int (*get_mt_mask_shift)(void); 525 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
526}; 526};
527 527
528extern struct kvm_x86_ops *kvm_x86_ops; 528extern struct kvm_x86_ops *kvm_x86_ops;
@@ -536,7 +536,7 @@ int kvm_mmu_setup(struct kvm_vcpu *vcpu);
536void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte); 536void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte);
537void kvm_mmu_set_base_ptes(u64 base_pte); 537void kvm_mmu_set_base_ptes(u64 base_pte);
538void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 538void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
539 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask); 539 u64 dirty_mask, u64 nx_mask, u64 x_mask);
540 540
541int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 541int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
542void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 542void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
@@ -550,6 +550,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
550 const void *val, int bytes); 550 const void *val, int bytes);
551int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes, 551int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
552 gpa_t addr, unsigned long *ret); 552 gpa_t addr, unsigned long *ret);
553u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
553 554
554extern bool tdp_enabled; 555extern bool tdp_enabled;
555 556
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3592aea59ef7..bc614f91f5ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -178,7 +178,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
178static u64 __read_mostly shadow_user_mask; 178static u64 __read_mostly shadow_user_mask;
179static u64 __read_mostly shadow_accessed_mask; 179static u64 __read_mostly shadow_accessed_mask;
180static u64 __read_mostly shadow_dirty_mask; 180static u64 __read_mostly shadow_dirty_mask;
181static u64 __read_mostly shadow_mt_mask;
182 181
183static inline u64 rsvd_bits(int s, int e) 182static inline u64 rsvd_bits(int s, int e)
184{ 183{
@@ -199,14 +198,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte)
199EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); 198EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
200 199
201void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, 200void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
202 u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) 201 u64 dirty_mask, u64 nx_mask, u64 x_mask)
203{ 202{
204 shadow_user_mask = user_mask; 203 shadow_user_mask = user_mask;
205 shadow_accessed_mask = accessed_mask; 204 shadow_accessed_mask = accessed_mask;
206 shadow_dirty_mask = dirty_mask; 205 shadow_dirty_mask = dirty_mask;
207 shadow_nx_mask = nx_mask; 206 shadow_nx_mask = nx_mask;
208 shadow_x_mask = x_mask; 207 shadow_x_mask = x_mask;
209 shadow_mt_mask = mt_mask;
210} 208}
211EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); 209EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
212 210
@@ -1608,7 +1606,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1608 return mtrr_state->def_type; 1606 return mtrr_state->def_type;
1609} 1607}
1610 1608
1611static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) 1609u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1612{ 1610{
1613 u8 mtrr; 1611 u8 mtrr;
1614 1612
@@ -1618,6 +1616,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
1618 mtrr = MTRR_TYPE_WRBACK; 1616 mtrr = MTRR_TYPE_WRBACK;
1619 return mtrr; 1617 return mtrr;
1620} 1618}
1619EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
1621 1620
1622static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1621static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1623{ 1622{
@@ -1670,7 +1669,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1670{ 1669{
1671 u64 spte; 1670 u64 spte;
1672 int ret = 0; 1671 int ret = 0;
1673 u64 mt_mask = shadow_mt_mask;
1674 1672
1675 /* 1673 /*
1676 * We don't set the accessed bit, since we sometimes want to see 1674 * We don't set the accessed bit, since we sometimes want to see
@@ -1690,16 +1688,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1690 spte |= shadow_user_mask; 1688 spte |= shadow_user_mask;
1691 if (largepage) 1689 if (largepage)
1692 spte |= PT_PAGE_SIZE_MASK; 1690 spte |= PT_PAGE_SIZE_MASK;
1693 if (mt_mask) { 1691 if (tdp_enabled)
1694 if (!kvm_is_mmio_pfn(pfn)) { 1692 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1695 mt_mask = get_memory_type(vcpu, gfn) << 1693 kvm_is_mmio_pfn(pfn));
1696 kvm_x86_ops->get_mt_mask_shift();
1697 mt_mask |= VMX_EPT_IGMT_BIT;
1698 } else
1699 mt_mask = MTRR_TYPE_UNCACHABLE <<
1700 kvm_x86_ops->get_mt_mask_shift();
1701 spte |= mt_mask;
1702 }
1703 1694
1704 spte |= (u64)pfn << PAGE_SHIFT; 1695 spte |= (u64)pfn << PAGE_SHIFT;
1705 1696
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d96a6d3edec7..63503782935d 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2589,7 +2589,7 @@ static int get_npt_level(void)
2589#endif 2589#endif
2590} 2590}
2591 2591
2592static int svm_get_mt_mask_shift(void) 2592static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2593{ 2593{
2594 return 0; 2594 return 0;
2595} 2595}
@@ -2652,7 +2652,7 @@ static struct kvm_x86_ops svm_x86_ops = {
2652 2652
2653 .set_tss_addr = svm_set_tss_addr, 2653 .set_tss_addr = svm_set_tss_addr,
2654 .get_tdp_level = get_npt_level, 2654 .get_tdp_level = get_npt_level,
2655 .get_mt_mask_shift = svm_get_mt_mask_shift, 2655 .get_mt_mask = svm_get_mt_mask,
2656}; 2656};
2657 2657
2658static int __init svm_init(void) 2658static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 25be53aa5eef..59b080c262e8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3577,9 +3577,17 @@ static int get_ept_level(void)
3577 return VMX_EPT_DEFAULT_GAW + 1; 3577 return VMX_EPT_DEFAULT_GAW + 1;
3578} 3578}
3579 3579
3580static int vmx_get_mt_mask_shift(void) 3580static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
3581{ 3581{
3582 return VMX_EPT_MT_EPTE_SHIFT; 3582 u64 ret;
3583
3584 if (is_mmio)
3585 ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT;
3586 else
3587 ret = (kvm_get_guest_memory_type(vcpu, gfn) <<
3588 VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IGMT_BIT;
3589
3590 return ret;
3583} 3591}
3584 3592
3585static struct kvm_x86_ops vmx_x86_ops = { 3593static struct kvm_x86_ops vmx_x86_ops = {
@@ -3639,7 +3647,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
3639 3647
3640 .set_tss_addr = vmx_set_tss_addr, 3648 .set_tss_addr = vmx_set_tss_addr,
3641 .get_tdp_level = get_ept_level, 3649 .get_tdp_level = get_ept_level,
3642 .get_mt_mask_shift = vmx_get_mt_mask_shift, 3650 .get_mt_mask = vmx_get_mt_mask,
3643}; 3651};
3644 3652
3645static int __init vmx_init(void) 3653static int __init vmx_init(void)
@@ -3698,8 +3706,7 @@ static int __init vmx_init(void)
3698 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | 3706 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
3699 VMX_EPT_WRITABLE_MASK); 3707 VMX_EPT_WRITABLE_MASK);
3700 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, 3708 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
3701 VMX_EPT_EXECUTABLE_MASK, 3709 VMX_EPT_EXECUTABLE_MASK);
3702 VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
3703 kvm_enable_tdp(); 3710 kvm_enable_tdp();
3704 } else 3711 } else
3705 kvm_disable_tdp(); 3712 kvm_disable_tdp();
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e2713716e732..dd056826f675 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2772,7 +2772,7 @@ int kvm_arch_init(void *opaque)
2772 kvm_mmu_set_nonpresent_ptes(0ull, 0ull); 2772 kvm_mmu_set_nonpresent_ptes(0ull, 0ull);
2773 kvm_mmu_set_base_ptes(PT_PRESENT_MASK); 2773 kvm_mmu_set_base_ptes(PT_PRESENT_MASK);
2774 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, 2774 kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
2775 PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); 2775 PT_DIRTY_MASK, PT64_NX_MASK, 0);
2776 2776
2777 for_each_possible_cpu(cpu) 2777 for_each_possible_cpu(cpu)
2778 per_cpu(cpu_tsc_khz, cpu) = tsc_khz; 2778 per_cpu(cpu_tsc_khz, cpu) = tsc_khz;