diff options
-rw-r--r-- | arch/x86/kvm/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 10 |
2 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 29b2ec46bf1e..59104927c582 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1958,7 +1958,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1958 | unsigned pte_access, int user_fault, | 1958 | unsigned pte_access, int user_fault, |
1959 | int write_fault, int dirty, int level, | 1959 | int write_fault, int dirty, int level, |
1960 | gfn_t gfn, pfn_t pfn, bool speculative, | 1960 | gfn_t gfn, pfn_t pfn, bool speculative, |
1961 | bool can_unsync, bool reset_host_protection) | 1961 | bool can_unsync, bool host_writable) |
1962 | { | 1962 | { |
1963 | u64 spte, entry = *sptep; | 1963 | u64 spte, entry = *sptep; |
1964 | int ret = 0; | 1964 | int ret = 0; |
@@ -1985,7 +1985,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
1985 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, | 1985 | spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, |
1986 | kvm_is_mmio_pfn(pfn)); | 1986 | kvm_is_mmio_pfn(pfn)); |
1987 | 1987 | ||
1988 | if (reset_host_protection) | 1988 | if (host_writable) |
1989 | spte |= SPTE_HOST_WRITEABLE; | 1989 | spte |= SPTE_HOST_WRITEABLE; |
1990 | 1990 | ||
1991 | spte |= (u64)pfn << PAGE_SHIFT; | 1991 | spte |= (u64)pfn << PAGE_SHIFT; |
@@ -2048,7 +2048,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2048 | int user_fault, int write_fault, int dirty, | 2048 | int user_fault, int write_fault, int dirty, |
2049 | int *ptwrite, int level, gfn_t gfn, | 2049 | int *ptwrite, int level, gfn_t gfn, |
2050 | pfn_t pfn, bool speculative, | 2050 | pfn_t pfn, bool speculative, |
2051 | bool reset_host_protection) | 2051 | bool host_writable) |
2052 | { | 2052 | { |
2053 | int was_rmapped = 0; | 2053 | int was_rmapped = 0; |
2054 | int rmap_count; | 2054 | int rmap_count; |
@@ -2083,7 +2083,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, | |||
2083 | 2083 | ||
2084 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, | 2084 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
2085 | dirty, level, gfn, pfn, speculative, true, | 2085 | dirty, level, gfn, pfn, speculative, true, |
2086 | reset_host_protection)) { | 2086 | host_writable)) { |
2087 | if (write_fault) | 2087 | if (write_fault) |
2088 | *ptwrite = 1; | 2088 | *ptwrite = 1; |
2089 | kvm_mmu_flush_tlb(vcpu); | 2089 | kvm_mmu_flush_tlb(vcpu); |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index ca0e5e834724..57619ed4beee 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -329,7 +329,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
329 | return; | 329 | return; |
330 | kvm_get_pfn(pfn); | 330 | kvm_get_pfn(pfn); |
331 | /* | 331 | /* |
332 | * we call mmu_set_spte() with reset_host_protection = true beacuse that | 332 | * we call mmu_set_spte() with host_writable = true beacuse that |
333 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). | 333 | * vcpu->arch.update_pte.pfn was fetched from get_user_pages(write = 1). |
334 | */ | 334 | */ |
335 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, | 335 | mmu_set_spte(vcpu, spte, sp->role.access, pte_access, 0, 0, |
@@ -744,7 +744,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
744 | bool clear_unsync) | 744 | bool clear_unsync) |
745 | { | 745 | { |
746 | int i, offset, nr_present; | 746 | int i, offset, nr_present; |
747 | bool reset_host_protection; | 747 | bool host_writable; |
748 | gpa_t first_pte_gpa; | 748 | gpa_t first_pte_gpa; |
749 | 749 | ||
750 | offset = nr_present = 0; | 750 | offset = nr_present = 0; |
@@ -794,14 +794,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | |||
794 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); | 794 | pte_access = sp->role.access & FNAME(gpte_access)(vcpu, gpte); |
795 | if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { | 795 | if (!(sp->spt[i] & SPTE_HOST_WRITEABLE)) { |
796 | pte_access &= ~ACC_WRITE_MASK; | 796 | pte_access &= ~ACC_WRITE_MASK; |
797 | reset_host_protection = 0; | 797 | host_writable = 0; |
798 | } else { | 798 | } else { |
799 | reset_host_protection = 1; | 799 | host_writable = 1; |
800 | } | 800 | } |
801 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, | 801 | set_spte(vcpu, &sp->spt[i], pte_access, 0, 0, |
802 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, | 802 | is_dirty_gpte(gpte), PT_PAGE_TABLE_LEVEL, gfn, |
803 | spte_to_pfn(sp->spt[i]), true, false, | 803 | spte_to_pfn(sp->spt[i]), true, false, |
804 | reset_host_protection); | 804 | host_writable); |
805 | } | 805 | } |
806 | 806 | ||
807 | return !nr_present; | 807 | return !nr_present; |