aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-09-12 07:03:28 -0400
committerAvi Kivity <avi@redhat.com>2012-09-20 06:00:08 -0400
commit3d34adec7081621ff51c195be045b87d75c0c49d (patch)
tree0f2be432c532d25e8979b3853eb57af754d96f72 /arch
parentedc2ae84eb40a3c062210fe01af1cae1633cc810 (diff)
KVM: MMU: Move gpte_access() out of paging_tmpl.h
We no longer rely on paging_tmpl.h defines; so we can move the function to mmu.c. Rely on zero extension to 64 bits to get the correct nx behaviour. Reviewed-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h21
2 files changed, 15 insertions, 16 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 54c9cb4fdfa4..f297a2ccf4f6 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3437,6 +3437,16 @@ static bool sync_mmio_spte(u64 *sptep, gfn_t gfn, unsigned access,
3437 return false; 3437 return false;
3438} 3438}
3439 3439
3440static inline unsigned gpte_access(struct kvm_vcpu *vcpu, u64 gpte)
3441{
3442 unsigned access;
3443
3444 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
3445 access &= ~(gpte >> PT64_NX_SHIFT);
3446
3447 return access;
3448}
3449
3440#define PTTYPE 64 3450#define PTTYPE 64
3441#include "paging_tmpl.h" 3451#include "paging_tmpl.h"
3442#undef PTTYPE 3452#undef PTTYPE
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 064bcb32d84e..1cbf576852ca 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -101,17 +101,6 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
101 return (ret != orig_pte); 101 return (ret != orig_pte);
102} 102}
103 103
104static unsigned FNAME(gpte_access)(struct kvm_vcpu *vcpu, pt_element_t gpte)
105{
106 unsigned access;
107
108 access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
109#if PTTYPE == 64
110 access &= ~(gpte >> PT64_NX_SHIFT);
111#endif
112 return access;
113}
114
115static bool FNAME(is_last_gpte)(struct guest_walker *walker, 104static bool FNAME(is_last_gpte)(struct guest_walker *walker,
116 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, 105 struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
117 pt_element_t gpte) 106 pt_element_t gpte)
@@ -217,7 +206,7 @@ retry_walk:
217 206
218 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte); 207 last_gpte = FNAME(is_last_gpte)(walker, vcpu, mmu, pte);
219 if (last_gpte) { 208 if (last_gpte) {
220 pte_access = pt_access & FNAME(gpte_access)(vcpu, pte); 209 pte_access = pt_access & gpte_access(vcpu, pte);
221 /* check if the kernel is fetching from user page */ 210 /* check if the kernel is fetching from user page */
222 if (unlikely(pte_access & PT_USER_MASK) && 211 if (unlikely(pte_access & PT_USER_MASK) &&
223 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP)) 212 kvm_read_cr4_bits(vcpu, X86_CR4_SMEP))
@@ -268,7 +257,7 @@ retry_walk:
268 break; 257 break;
269 } 258 }
270 259
271 pt_access &= FNAME(gpte_access)(vcpu, pte); 260 pt_access &= gpte_access(vcpu, pte);
272 --walker->level; 261 --walker->level;
273 } 262 }
274 263
@@ -364,7 +353,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
364 return; 353 return;
365 354
366 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); 355 pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte);
367 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 356 pte_access = sp->role.access & gpte_access(vcpu, gpte);
368 protect_clean_gpte(&pte_access, gpte); 357 protect_clean_gpte(&pte_access, gpte);
369 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte)); 358 pfn = gfn_to_pfn_atomic(vcpu->kvm, gpte_to_gfn(gpte));
370 if (mmu_invalid_pfn(pfn)) 359 if (mmu_invalid_pfn(pfn))
@@ -438,7 +427,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
438 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) 427 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte))
439 continue; 428 continue;
440 429
441 pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); 430 pte_access = sp->role.access & gpte_access(vcpu, gpte);
442 protect_clean_gpte(&pte_access, gpte); 431 protect_clean_gpte(&pte_access, gpte);
443 gfn = gpte_to_gfn(gpte); 432 gfn = gpte_to_gfn(gpte);
444 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, 433 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn,
@@ -791,7 +780,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
791 780
792 gfn = gpte_to_gfn(gpte); 781 gfn = gpte_to_gfn(gpte);
793 pte_access = sp->role.access; 782 pte_access = sp->role.access;
794 pte_access &= FNAME(gpte_access)(vcpu, gpte); 783 pte_access &= gpte_access(vcpu, gpte);
795 protect_clean_gpte(&pte_access, gpte); 784 protect_clean_gpte(&pte_access, gpte);
796 785
797 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present)) 786 if (sync_mmio_spte(&sp->spt[i], gfn, pte_access, &nr_present))