aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-01-18 04:45:10 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:00 -0500
commit8dae444529230301bc85fc86033aa06a734c1a29 (patch)
tree90fe862877814f5815b5623d6e93b9568b0725ef
parentc25bc1638a1211f57cccbabdd8b732813b852340 (diff)
KVM: rename is_writeble_pte() to is_writable_pte()
There are two spellings of "writable" in arch/x86/kvm/mmu.c and paging_tmpl.h . This patch renames is_writeble_pte() to is_writable_pte() and makes grepping easy. New name is consistent with the definition of itself: return pte & PT_WRITABLE_MASK; Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c18
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
2 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 276bf7497c36..ff2b2e8d72eb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -250,7 +250,7 @@ static int is_large_pte(u64 pte)
250 return pte & PT_PAGE_SIZE_MASK; 250 return pte & PT_PAGE_SIZE_MASK;
251} 251}
252 252
253static int is_writeble_pte(unsigned long pte) 253static int is_writable_pte(unsigned long pte)
254{ 254{
255 return pte & PT_WRITABLE_MASK; 255 return pte & PT_WRITABLE_MASK;
256} 256}
@@ -632,7 +632,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
632 pfn = spte_to_pfn(*spte); 632 pfn = spte_to_pfn(*spte);
633 if (*spte & shadow_accessed_mask) 633 if (*spte & shadow_accessed_mask)
634 kvm_set_pfn_accessed(pfn); 634 kvm_set_pfn_accessed(pfn);
635 if (is_writeble_pte(*spte)) 635 if (is_writable_pte(*spte))
636 kvm_set_pfn_dirty(pfn); 636 kvm_set_pfn_dirty(pfn);
637 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 637 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
638 if (!*rmapp) { 638 if (!*rmapp) {
@@ -708,7 +708,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
708 BUG_ON(!spte); 708 BUG_ON(!spte);
709 BUG_ON(!(*spte & PT_PRESENT_MASK)); 709 BUG_ON(!(*spte & PT_PRESENT_MASK));
710 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 710 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
711 if (is_writeble_pte(*spte)) { 711 if (is_writable_pte(*spte)) {
712 __set_spte(spte, *spte & ~PT_WRITABLE_MASK); 712 __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
713 write_protected = 1; 713 write_protected = 1;
714 } 714 }
@@ -732,7 +732,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
732 BUG_ON(!(*spte & PT_PRESENT_MASK)); 732 BUG_ON(!(*spte & PT_PRESENT_MASK));
733 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 733 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
734 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 734 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
735 if (is_writeble_pte(*spte)) { 735 if (is_writable_pte(*spte)) {
736 rmap_remove(kvm, spte); 736 rmap_remove(kvm, spte);
737 --kvm->stat.lpages; 737 --kvm->stat.lpages;
738 __set_spte(spte, shadow_trap_nonpresent_pte); 738 __set_spte(spte, shadow_trap_nonpresent_pte);
@@ -787,7 +787,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
787 787
788 new_spte &= ~PT_WRITABLE_MASK; 788 new_spte &= ~PT_WRITABLE_MASK;
789 new_spte &= ~SPTE_HOST_WRITEABLE; 789 new_spte &= ~SPTE_HOST_WRITEABLE;
790 if (is_writeble_pte(*spte)) 790 if (is_writable_pte(*spte))
791 kvm_set_pfn_dirty(spte_to_pfn(*spte)); 791 kvm_set_pfn_dirty(spte_to_pfn(*spte));
792 __set_spte(spte, new_spte); 792 __set_spte(spte, new_spte);
793 spte = rmap_next(kvm, rmapp, spte); 793 spte = rmap_next(kvm, rmapp, spte);
@@ -1847,7 +1847,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1847 * is responsibility of mmu_get_page / kvm_sync_page. 1847 * is responsibility of mmu_get_page / kvm_sync_page.
1848 * Same reasoning can be applied to dirty page accounting. 1848 * Same reasoning can be applied to dirty page accounting.
1849 */ 1849 */
1850 if (!can_unsync && is_writeble_pte(*sptep)) 1850 if (!can_unsync && is_writable_pte(*sptep))
1851 goto set_pte; 1851 goto set_pte;
1852 1852
1853 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1853 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1855,7 +1855,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1855 __func__, gfn); 1855 __func__, gfn);
1856 ret = 1; 1856 ret = 1;
1857 pte_access &= ~ACC_WRITE_MASK; 1857 pte_access &= ~ACC_WRITE_MASK;
1858 if (is_writeble_pte(spte)) 1858 if (is_writable_pte(spte))
1859 spte &= ~PT_WRITABLE_MASK; 1859 spte &= ~PT_WRITABLE_MASK;
1860 } 1860 }
1861 } 1861 }
@@ -1876,7 +1876,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1876 bool reset_host_protection) 1876 bool reset_host_protection)
1877{ 1877{
1878 int was_rmapped = 0; 1878 int was_rmapped = 0;
1879 int was_writeble = is_writeble_pte(*sptep); 1879 int was_writable = is_writable_pte(*sptep);
1880 int rmap_count; 1880 int rmap_count;
1881 1881
1882 pgprintk("%s: spte %llx access %x write_fault %d" 1882 pgprintk("%s: spte %llx access %x write_fault %d"
@@ -1927,7 +1927,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1927 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1927 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1928 rmap_recycle(vcpu, sptep, gfn); 1928 rmap_recycle(vcpu, sptep, gfn);
1929 } else { 1929 } else {
1930 if (was_writeble) 1930 if (was_writable)
1931 kvm_release_pfn_dirty(pfn); 1931 kvm_release_pfn_dirty(pfn);
1932 else 1932 else
1933 kvm_release_pfn_clean(pfn); 1933 kvm_release_pfn_clean(pfn);
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index ede2131a9225..df15a5307d2d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -162,7 +162,7 @@ walk:
162 if (rsvd_fault) 162 if (rsvd_fault)
163 goto access_error; 163 goto access_error;
164 164
165 if (write_fault && !is_writeble_pte(pte)) 165 if (write_fault && !is_writable_pte(pte))
166 if (user_fault || is_write_protection(vcpu)) 166 if (user_fault || is_write_protection(vcpu))
167 goto access_error; 167 goto access_error;
168 168