diff options
author | Sheng Yang <sheng@linux.intel.com> | 2009-04-27 08:35:42 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-06-10 04:48:49 -0400 |
commit | 4b12f0de33a64dfc624b2480f55b674f7fa23ef2 (patch) | |
tree | ed12e1230b777add7aae2b7d24fa82f8e4efbb34 /arch/x86/kvm | |
parent | 9b62e5b10ff0f98346bcbe4a4fe3a0ca8fa7be30 (diff) |
KVM: Replace get_mt_mask_shift with get_mt_mask
Shadow_mt_mask is out of date, now it have only been used as a flag to indicate
if TDP enabled. Get rid of it and use tdp_enabled instead.
Also put memory type logical in kvm_x86_ops->get_mt_mask().
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/mmu.c | 21 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 17 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 2 |
4 files changed, 21 insertions, 23 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 3592aea59ef7..bc614f91f5ba 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -178,7 +178,6 @@ static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ | |||
178 | static u64 __read_mostly shadow_user_mask; | 178 | static u64 __read_mostly shadow_user_mask; |
179 | static u64 __read_mostly shadow_accessed_mask; | 179 | static u64 __read_mostly shadow_accessed_mask; |
180 | static u64 __read_mostly shadow_dirty_mask; | 180 | static u64 __read_mostly shadow_dirty_mask; |
181 | static u64 __read_mostly shadow_mt_mask; | ||
182 | 181 | ||
183 | static inline u64 rsvd_bits(int s, int e) | 182 | static inline u64 rsvd_bits(int s, int e) |
184 | { | 183 | { |
@@ -199,14 +198,13 @@ void kvm_mmu_set_base_ptes(u64 base_pte) | |||
199 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); | 198 | EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes); |
200 | 199 | ||
201 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, | 200 | void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask, |
202 | u64 dirty_mask, u64 nx_mask, u64 x_mask, u64 mt_mask) | 201 | u64 dirty_mask, u64 nx_mask, u64 x_mask) |
203 | { | 202 | { |
204 | shadow_user_mask = user_mask; | 203 | shadow_user_mask = user_mask; |
205 | shadow_accessed_mask = accessed_mask; | 204 | shadow_accessed_mask = accessed_mask; |
206 | shadow_dirty_mask = dirty_mask; | 205 | shadow_dirty_mask = dirty_mask; |
207 | shadow_nx_mask = nx_mask; | 206 | shadow_nx_mask = nx_mask; |
208 | shadow_x_mask = x_mask; | 207 | shadow_x_mask = x_mask; |
209 | shadow_mt_mask = mt_mask; | ||
210 | } | 208 | } |
211 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); | 209 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes); |
212 | 210 | ||
@@ -1608,7 +1606,7 @@ static int get_mtrr_type(struct mtrr_state_type *mtrr_state, | |||
1608 | return mtrr_state->def_type; | 1606 | return mtrr_state->def_type; |
1609 | } | 1607 | } |
1610 | 1608 | ||
1611 | static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) | 1609 | u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) |
1612 | { | 1610 | { |
1613 | u8 mtrr; | 1611 | u8 mtrr; |
1614 | 1612 | ||
@@ -1618,6 +1616,7 @@ static u8 get_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
1618 | mtrr = MTRR_TYPE_WRBACK; | 1616 | mtrr = MTRR_TYPE_WRBACK; |
1619 | return mtrr; | 1617 | return mtrr; |
1620 | } | 1618 | } |
1619 | EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type); | ||
1621 | 1620 | ||
1622 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1621 | static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
1623 | { | 1622 | { |
@@ -1670,7 +1669,6 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1670 | { | 1669 | { |
1671 | u64 spte; | 1670 | u64 spte; |
1672 | int ret = 0; | 1671 | int ret = 0; |
1673 | u64 mt_mask = shadow_mt_mask; | ||
1674 | 1672 | ||
1675 | /* | 1673 | /* |
1676 | * We don't set the accessed bit, since we sometimes want to see | 1674 | * We don't set the accessed bit, since we sometimes want to see |
@@ -1690,16 +1688,9 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1690 | spte |= shadow_user_mask; | 1688 | spte |= shadow_user_mask; |
1691 | if (largepage) | 1689 | if (largepage) |
1692 | spte |= PT_PAGE_SIZE_MASK; | 1690 | spte |= PT_PAGE_SIZE_MASK; |
1693 | if (mt_mask) { | 1691 | if (tdp_enabled) |
1694 | if (!kvm_is_mmio_pfn(pfn)) { | 1692 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
1695 | mt_mask = get_memory_type(vcpu, gfn) << | 1693 | kvm_is_mmio_pfn(pfn)); |
1696 | kvm_x86_ops->get_mt_mask_shift(); | ||
1697 | mt_mask |= VMX_EPT_IGMT_BIT; | ||
1698 | } else | ||
1699 | mt_mask = MTRR_TYPE_UNCACHABLE << | ||
1700 | kvm_x86_ops->get_mt_mask_shift(); | ||
1701 | spte |= mt_mask; | ||
1702 | } | ||
1703 | 1694 | ||
1704 | spte |= (u64)pfn << PAGE_SHIFT; | 1695 | spte |= (u64)pfn << PAGE_SHIFT; |
1705 | 1696 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d96a6d3edec7..63503782935d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2589,7 +2589,7 @@ static int get_npt_level(void) | |||
2589 | #endif | 2589 | #endif |
2590 | } | 2590 | } |
2591 | 2591 | ||
2592 | static int svm_get_mt_mask_shift(void) | 2592 | static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
2593 | { | 2593 | { |
2594 | return 0; | 2594 | return 0; |
2595 | } | 2595 | } |
@@ -2652,7 +2652,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2652 | 2652 | ||
2653 | .set_tss_addr = svm_set_tss_addr, | 2653 | .set_tss_addr = svm_set_tss_addr, |
2654 | .get_tdp_level = get_npt_level, | 2654 | .get_tdp_level = get_npt_level, |
2655 | .get_mt_mask_shift = svm_get_mt_mask_shift, | 2655 | .get_mt_mask = svm_get_mt_mask, |
2656 | }; | 2656 | }; |
2657 | 2657 | ||
2658 | static int __init svm_init(void) | 2658 | static int __init svm_init(void) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 25be53aa5eef..59b080c262e8 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3577,9 +3577,17 @@ static int get_ept_level(void) | |||
3577 | return VMX_EPT_DEFAULT_GAW + 1; | 3577 | return VMX_EPT_DEFAULT_GAW + 1; |
3578 | } | 3578 | } |
3579 | 3579 | ||
3580 | static int vmx_get_mt_mask_shift(void) | 3580 | static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) |
3581 | { | 3581 | { |
3582 | return VMX_EPT_MT_EPTE_SHIFT; | 3582 | u64 ret; |
3583 | |||
3584 | if (is_mmio) | ||
3585 | ret = MTRR_TYPE_UNCACHABLE << VMX_EPT_MT_EPTE_SHIFT; | ||
3586 | else | ||
3587 | ret = (kvm_get_guest_memory_type(vcpu, gfn) << | ||
3588 | VMX_EPT_MT_EPTE_SHIFT) | VMX_EPT_IGMT_BIT; | ||
3589 | |||
3590 | return ret; | ||
3583 | } | 3591 | } |
3584 | 3592 | ||
3585 | static struct kvm_x86_ops vmx_x86_ops = { | 3593 | static struct kvm_x86_ops vmx_x86_ops = { |
@@ -3639,7 +3647,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3639 | 3647 | ||
3640 | .set_tss_addr = vmx_set_tss_addr, | 3648 | .set_tss_addr = vmx_set_tss_addr, |
3641 | .get_tdp_level = get_ept_level, | 3649 | .get_tdp_level = get_ept_level, |
3642 | .get_mt_mask_shift = vmx_get_mt_mask_shift, | 3650 | .get_mt_mask = vmx_get_mt_mask, |
3643 | }; | 3651 | }; |
3644 | 3652 | ||
3645 | static int __init vmx_init(void) | 3653 | static int __init vmx_init(void) |
@@ -3698,8 +3706,7 @@ static int __init vmx_init(void) | |||
3698 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | | 3706 | kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK | |
3699 | VMX_EPT_WRITABLE_MASK); | 3707 | VMX_EPT_WRITABLE_MASK); |
3700 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, | 3708 | kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull, |
3701 | VMX_EPT_EXECUTABLE_MASK, | 3709 | VMX_EPT_EXECUTABLE_MASK); |
3702 | VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT); | ||
3703 | kvm_enable_tdp(); | 3710 | kvm_enable_tdp(); |
3704 | } else | 3711 | } else |
3705 | kvm_disable_tdp(); | 3712 | kvm_disable_tdp(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e2713716e732..dd056826f675 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -2772,7 +2772,7 @@ int kvm_arch_init(void *opaque) | |||
2772 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); | 2772 | kvm_mmu_set_nonpresent_ptes(0ull, 0ull); |
2773 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); | 2773 | kvm_mmu_set_base_ptes(PT_PRESENT_MASK); |
2774 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, | 2774 | kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK, |
2775 | PT_DIRTY_MASK, PT64_NX_MASK, 0, 0); | 2775 | PT_DIRTY_MASK, PT64_NX_MASK, 0); |
2776 | 2776 | ||
2777 | for_each_possible_cpu(cpu) | 2777 | for_each_possible_cpu(cpu) |
2778 | per_cpu(cpu_tsc_khz, cpu) = tsc_khz; | 2778 | per_cpu(cpu_tsc_khz, cpu) = tsc_khz; |