aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-06-10 07:24:23 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:51 -0400
commitd555c333aa544b222fe077adcd5dfea024b2c913 (patch)
tree778cc7309b831690c4ec77741288dce3cf393aa8 /arch/x86/kvm/mmu.c
parent43a3795a3a12425de31e25ce0ebc3bb41501cef7 (diff)
KVM: MMU: s/shadow_pte/spte/
We use shadow_pte and spte inconsistently, switch to the shorter spelling. Rename set_shadow_pte() to __set_spte() to avoid a conflict with the existing set_spte(), and to indicate its lowlevelness. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c102
1 files changed, 51 insertions, 51 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index a039e6bc21f7..d443a421ca3e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -143,7 +143,7 @@ module_param(oos_shadow, bool, 0644);
143#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 143#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
144 144
145struct kvm_rmap_desc { 145struct kvm_rmap_desc {
146 u64 *shadow_ptes[RMAP_EXT]; 146 u64 *sptes[RMAP_EXT];
147 struct kvm_rmap_desc *more; 147 struct kvm_rmap_desc *more;
148}; 148};
149 149
@@ -262,7 +262,7 @@ static gfn_t pse36_gfn_delta(u32 gpte)
262 return (gpte & PT32_DIR_PSE36_MASK) << shift; 262 return (gpte & PT32_DIR_PSE36_MASK) << shift;
263} 263}
264 264
265static void set_shadow_pte(u64 *sptep, u64 spte) 265static void __set_spte(u64 *sptep, u64 spte)
266{ 266{
267#ifdef CONFIG_X86_64 267#ifdef CONFIG_X86_64
268 set_64bit((unsigned long *)sptep, spte); 268 set_64bit((unsigned long *)sptep, spte);
@@ -514,23 +514,23 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
514 } else if (!(*rmapp & 1)) { 514 } else if (!(*rmapp & 1)) {
515 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); 515 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
516 desc = mmu_alloc_rmap_desc(vcpu); 516 desc = mmu_alloc_rmap_desc(vcpu);
517 desc->shadow_ptes[0] = (u64 *)*rmapp; 517 desc->sptes[0] = (u64 *)*rmapp;
518 desc->shadow_ptes[1] = spte; 518 desc->sptes[1] = spte;
519 *rmapp = (unsigned long)desc | 1; 519 *rmapp = (unsigned long)desc | 1;
520 } else { 520 } else {
521 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); 521 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
522 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 522 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
523 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { 523 while (desc->sptes[RMAP_EXT-1] && desc->more) {
524 desc = desc->more; 524 desc = desc->more;
525 count += RMAP_EXT; 525 count += RMAP_EXT;
526 } 526 }
527 if (desc->shadow_ptes[RMAP_EXT-1]) { 527 if (desc->sptes[RMAP_EXT-1]) {
528 desc->more = mmu_alloc_rmap_desc(vcpu); 528 desc->more = mmu_alloc_rmap_desc(vcpu);
529 desc = desc->more; 529 desc = desc->more;
530 } 530 }
531 for (i = 0; desc->shadow_ptes[i]; ++i) 531 for (i = 0; desc->sptes[i]; ++i)
532 ; 532 ;
533 desc->shadow_ptes[i] = spte; 533 desc->sptes[i] = spte;
534 } 534 }
535 return count; 535 return count;
536} 536}
@@ -542,14 +542,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp,
542{ 542{
543 int j; 543 int j;
544 544
545 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) 545 for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
546 ; 546 ;
547 desc->shadow_ptes[i] = desc->shadow_ptes[j]; 547 desc->sptes[i] = desc->sptes[j];
548 desc->shadow_ptes[j] = NULL; 548 desc->sptes[j] = NULL;
549 if (j != 0) 549 if (j != 0)
550 return; 550 return;
551 if (!prev_desc && !desc->more) 551 if (!prev_desc && !desc->more)
552 *rmapp = (unsigned long)desc->shadow_ptes[0]; 552 *rmapp = (unsigned long)desc->sptes[0];
553 else 553 else
554 if (prev_desc) 554 if (prev_desc)
555 prev_desc->more = desc->more; 555 prev_desc->more = desc->more;
@@ -594,8 +594,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
594 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 594 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
595 prev_desc = NULL; 595 prev_desc = NULL;
596 while (desc) { 596 while (desc) {
597 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) 597 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
598 if (desc->shadow_ptes[i] == spte) { 598 if (desc->sptes[i] == spte) {
599 rmap_desc_remove_entry(rmapp, 599 rmap_desc_remove_entry(rmapp,
600 desc, i, 600 desc, i,
601 prev_desc); 601 prev_desc);
@@ -626,10 +626,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
626 prev_desc = NULL; 626 prev_desc = NULL;
627 prev_spte = NULL; 627 prev_spte = NULL;
628 while (desc) { 628 while (desc) {
629 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) { 629 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
630 if (prev_spte == spte) 630 if (prev_spte == spte)
631 return desc->shadow_ptes[i]; 631 return desc->sptes[i];
632 prev_spte = desc->shadow_ptes[i]; 632 prev_spte = desc->sptes[i];
633 } 633 }
634 desc = desc->more; 634 desc = desc->more;
635 } 635 }
@@ -651,7 +651,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
651 BUG_ON(!(*spte & PT_PRESENT_MASK)); 651 BUG_ON(!(*spte & PT_PRESENT_MASK));
652 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 652 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
653 if (is_writeble_pte(*spte)) { 653 if (is_writeble_pte(*spte)) {
654 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); 654 __set_spte(spte, *spte & ~PT_WRITABLE_MASK);
655 write_protected = 1; 655 write_protected = 1;
656 } 656 }
657 spte = rmap_next(kvm, rmapp, spte); 657 spte = rmap_next(kvm, rmapp, spte);
@@ -675,7 +675,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
675 if (is_writeble_pte(*spte)) { 675 if (is_writeble_pte(*spte)) {
676 rmap_remove(kvm, spte); 676 rmap_remove(kvm, spte);
677 --kvm->stat.lpages; 677 --kvm->stat.lpages;
678 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 678 __set_spte(spte, shadow_trap_nonpresent_pte);
679 spte = NULL; 679 spte = NULL;
680 write_protected = 1; 680 write_protected = 1;
681 } 681 }
@@ -694,7 +694,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
694 BUG_ON(!(*spte & PT_PRESENT_MASK)); 694 BUG_ON(!(*spte & PT_PRESENT_MASK));
695 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 695 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
696 rmap_remove(kvm, spte); 696 rmap_remove(kvm, spte);
697 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 697 __set_spte(spte, shadow_trap_nonpresent_pte);
698 need_tlb_flush = 1; 698 need_tlb_flush = 1;
699 } 699 }
700 return need_tlb_flush; 700 return need_tlb_flush;
@@ -1369,7 +1369,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1369 } 1369 }
1370 BUG_ON(!parent_pte); 1370 BUG_ON(!parent_pte);
1371 kvm_mmu_put_page(sp, parent_pte); 1371 kvm_mmu_put_page(sp, parent_pte);
1372 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); 1372 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
1373 } 1373 }
1374} 1374}
1375 1375
@@ -1517,7 +1517,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1517 1517
1518 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 1518 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1519 if (pt[i] == shadow_notrap_nonpresent_pte) 1519 if (pt[i] == shadow_notrap_nonpresent_pte)
1520 set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte); 1520 __set_spte(&pt[i], shadow_trap_nonpresent_pte);
1521 } 1521 }
1522} 1522}
1523 1523
@@ -1683,7 +1683,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1683 return 0; 1683 return 0;
1684} 1684}
1685 1685
1686static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1686static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1687 unsigned pte_access, int user_fault, 1687 unsigned pte_access, int user_fault,
1688 int write_fault, int dirty, int largepage, 1688 int write_fault, int dirty, int largepage,
1689 gfn_t gfn, pfn_t pfn, bool speculative, 1689 gfn_t gfn, pfn_t pfn, bool speculative,
@@ -1733,7 +1733,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1733 * is responsibility of mmu_get_page / kvm_sync_page. 1733 * is responsibility of mmu_get_page / kvm_sync_page.
1734 * Same reasoning can be applied to dirty page accounting. 1734 * Same reasoning can be applied to dirty page accounting.
1735 */ 1735 */
1736 if (!can_unsync && is_writeble_pte(*shadow_pte)) 1736 if (!can_unsync && is_writeble_pte(*sptep))
1737 goto set_pte; 1737 goto set_pte;
1738 1738
1739 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { 1739 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
@@ -1750,62 +1750,62 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1750 mark_page_dirty(vcpu->kvm, gfn); 1750 mark_page_dirty(vcpu->kvm, gfn);
1751 1751
1752set_pte: 1752set_pte:
1753 set_shadow_pte(shadow_pte, spte); 1753 __set_spte(sptep, spte);
1754 return ret; 1754 return ret;
1755} 1755}
1756 1756
1757static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, 1757static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1758 unsigned pt_access, unsigned pte_access, 1758 unsigned pt_access, unsigned pte_access,
1759 int user_fault, int write_fault, int dirty, 1759 int user_fault, int write_fault, int dirty,
1760 int *ptwrite, int largepage, gfn_t gfn, 1760 int *ptwrite, int largepage, gfn_t gfn,
1761 pfn_t pfn, bool speculative) 1761 pfn_t pfn, bool speculative)
1762{ 1762{
1763 int was_rmapped = 0; 1763 int was_rmapped = 0;
1764 int was_writeble = is_writeble_pte(*shadow_pte); 1764 int was_writeble = is_writeble_pte(*sptep);
1765 int rmap_count; 1765 int rmap_count;
1766 1766
1767 pgprintk("%s: spte %llx access %x write_fault %d" 1767 pgprintk("%s: spte %llx access %x write_fault %d"
1768 " user_fault %d gfn %lx\n", 1768 " user_fault %d gfn %lx\n",
1769 __func__, *shadow_pte, pt_access, 1769 __func__, *sptep, pt_access,
1770 write_fault, user_fault, gfn); 1770 write_fault, user_fault, gfn);
1771 1771
1772 if (is_rmap_spte(*shadow_pte)) { 1772 if (is_rmap_spte(*sptep)) {
1773 /* 1773 /*
1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink 1774 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1775 * the parent of the now unreachable PTE. 1775 * the parent of the now unreachable PTE.
1776 */ 1776 */
1777 if (largepage && !is_large_pte(*shadow_pte)) { 1777 if (largepage && !is_large_pte(*sptep)) {
1778 struct kvm_mmu_page *child; 1778 struct kvm_mmu_page *child;
1779 u64 pte = *shadow_pte; 1779 u64 pte = *sptep;
1780 1780
1781 child = page_header(pte & PT64_BASE_ADDR_MASK); 1781 child = page_header(pte & PT64_BASE_ADDR_MASK);
1782 mmu_page_remove_parent_pte(child, shadow_pte); 1782 mmu_page_remove_parent_pte(child, sptep);
1783 } else if (pfn != spte_to_pfn(*shadow_pte)) { 1783 } else if (pfn != spte_to_pfn(*sptep)) {
1784 pgprintk("hfn old %lx new %lx\n", 1784 pgprintk("hfn old %lx new %lx\n",
1785 spte_to_pfn(*shadow_pte), pfn); 1785 spte_to_pfn(*sptep), pfn);
1786 rmap_remove(vcpu->kvm, shadow_pte); 1786 rmap_remove(vcpu->kvm, sptep);
1787 } else 1787 } else
1788 was_rmapped = 1; 1788 was_rmapped = 1;
1789 } 1789 }
1790 if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, 1790 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1791 dirty, largepage, gfn, pfn, speculative, true)) { 1791 dirty, largepage, gfn, pfn, speculative, true)) {
1792 if (write_fault) 1792 if (write_fault)
1793 *ptwrite = 1; 1793 *ptwrite = 1;
1794 kvm_x86_ops->tlb_flush(vcpu); 1794 kvm_x86_ops->tlb_flush(vcpu);
1795 } 1795 }
1796 1796
1797 pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte); 1797 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1798 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", 1798 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
1799 is_large_pte(*shadow_pte)? "2MB" : "4kB", 1799 is_large_pte(*sptep)? "2MB" : "4kB",
1800 is_present_pte(*shadow_pte)?"RW":"R", gfn, 1800 is_present_pte(*sptep)?"RW":"R", gfn,
1801 *shadow_pte, shadow_pte); 1801 *shadow_pte, sptep);
1802 if (!was_rmapped && is_large_pte(*shadow_pte)) 1802 if (!was_rmapped && is_large_pte(*sptep))
1803 ++vcpu->kvm->stat.lpages; 1803 ++vcpu->kvm->stat.lpages;
1804 1804
1805 page_header_update_slot(vcpu->kvm, shadow_pte, gfn); 1805 page_header_update_slot(vcpu->kvm, sptep, gfn);
1806 if (!was_rmapped) { 1806 if (!was_rmapped) {
1807 rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); 1807 rmap_count = rmap_add(vcpu, sptep, gfn, largepage);
1808 if (!is_rmap_spte(*shadow_pte)) 1808 if (!is_rmap_spte(*sptep))
1809 kvm_release_pfn_clean(pfn); 1809 kvm_release_pfn_clean(pfn);
1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1810 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1811 rmap_recycle(vcpu, gfn, largepage); 1811 rmap_recycle(vcpu, gfn, largepage);
@@ -1816,7 +1816,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1816 kvm_release_pfn_clean(pfn); 1816 kvm_release_pfn_clean(pfn);
1817 } 1817 }
1818 if (speculative) { 1818 if (speculative) {
1819 vcpu->arch.last_pte_updated = shadow_pte; 1819 vcpu->arch.last_pte_updated = sptep;
1820 vcpu->arch.last_pte_gfn = gfn; 1820 vcpu->arch.last_pte_gfn = gfn;
1821 } 1821 }
1822} 1822}
@@ -1854,10 +1854,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
1854 return -ENOMEM; 1854 return -ENOMEM;
1855 } 1855 }
1856 1856
1857 set_shadow_pte(iterator.sptep, 1857 __set_spte(iterator.sptep,
1858 __pa(sp->spt) 1858 __pa(sp->spt)
1859 | PT_PRESENT_MASK | PT_WRITABLE_MASK 1859 | PT_PRESENT_MASK | PT_WRITABLE_MASK
1860 | shadow_user_mask | shadow_x_mask); 1860 | shadow_user_mask | shadow_x_mask);
1861 } 1861 }
1862 } 1862 }
1863 return pt_write; 1863 return pt_write;
@@ -2389,7 +2389,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
2389 mmu_page_remove_parent_pte(child, spte); 2389 mmu_page_remove_parent_pte(child, spte);
2390 } 2390 }
2391 } 2391 }
2392 set_shadow_pte(spte, shadow_trap_nonpresent_pte); 2392 __set_spte(spte, shadow_trap_nonpresent_pte);
2393 if (is_large_pte(pte)) 2393 if (is_large_pte(pte))
2394 --vcpu->kvm->stat.lpages; 2394 --vcpu->kvm->stat.lpages;
2395} 2395}
@@ -3125,7 +3125,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu)
3125 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul); 3125 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
3126 while (d) { 3126 while (d) {
3127 for (k = 0; k < RMAP_EXT; ++k) 3127 for (k = 0; k < RMAP_EXT; ++k)
3128 if (d->shadow_ptes[k]) 3128 if (d->sptes[k])
3129 ++nmaps; 3129 ++nmaps;
3130 else 3130 else
3131 break; 3131 break;