diff options
author | Avi Kivity <avi@redhat.com> | 2009-06-10 07:24:23 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:32:51 -0400 |
commit | d555c333aa544b222fe077adcd5dfea024b2c913 (patch) | |
tree | 778cc7309b831690c4ec77741288dce3cf393aa8 | |
parent | 43a3795a3a12425de31e25ce0ebc3bb41501cef7 (diff) |
KVM: MMU: s/shadow_pte/spte/
We use shadow_pte and spte inconsistently, switch to the shorter spelling.
Rename set_shadow_pte() to __set_spte() to avoid a conflict with the
existing set_spte(), and to indicate its lowlevelness.
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/mmu.c | 102 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 16 |
2 files changed, 59 insertions, 59 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a039e6bc21f7..d443a421ca3e 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -143,7 +143,7 @@ module_param(oos_shadow, bool, 0644); | |||
143 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) | 143 | #define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) |
144 | 144 | ||
145 | struct kvm_rmap_desc { | 145 | struct kvm_rmap_desc { |
146 | u64 *shadow_ptes[RMAP_EXT]; | 146 | u64 *sptes[RMAP_EXT]; |
147 | struct kvm_rmap_desc *more; | 147 | struct kvm_rmap_desc *more; |
148 | }; | 148 | }; |
149 | 149 | ||
@@ -262,7 +262,7 @@ static gfn_t pse36_gfn_delta(u32 gpte) | |||
262 | return (gpte & PT32_DIR_PSE36_MASK) << shift; | 262 | return (gpte & PT32_DIR_PSE36_MASK) << shift; |
263 | } | 263 | } |
264 | 264 | ||
265 | static void set_shadow_pte(u64 *sptep, u64 spte) | 265 | static void __set_spte(u64 *sptep, u64 spte) |
266 | { | 266 | { |
267 | #ifdef CONFIG_X86_64 | 267 | #ifdef CONFIG_X86_64 |
268 | set_64bit((unsigned long *)sptep, spte); | 268 | set_64bit((unsigned long *)sptep, spte); |
@@ -514,23 +514,23 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) | |||
514 | } else if (!(*rmapp & 1)) { | 514 | } else if (!(*rmapp & 1)) { |
515 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); | 515 | rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte); |
516 | desc = mmu_alloc_rmap_desc(vcpu); | 516 | desc = mmu_alloc_rmap_desc(vcpu); |
517 | desc->shadow_ptes[0] = (u64 *)*rmapp; | 517 | desc->sptes[0] = (u64 *)*rmapp; |
518 | desc->shadow_ptes[1] = spte; | 518 | desc->sptes[1] = spte; |
519 | *rmapp = (unsigned long)desc | 1; | 519 | *rmapp = (unsigned long)desc | 1; |
520 | } else { | 520 | } else { |
521 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); | 521 | rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte); |
522 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 522 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
523 | while (desc->shadow_ptes[RMAP_EXT-1] && desc->more) { | 523 | while (desc->sptes[RMAP_EXT-1] && desc->more) { |
524 | desc = desc->more; | 524 | desc = desc->more; |
525 | count += RMAP_EXT; | 525 | count += RMAP_EXT; |
526 | } | 526 | } |
527 | if (desc->shadow_ptes[RMAP_EXT-1]) { | 527 | if (desc->sptes[RMAP_EXT-1]) { |
528 | desc->more = mmu_alloc_rmap_desc(vcpu); | 528 | desc->more = mmu_alloc_rmap_desc(vcpu); |
529 | desc = desc->more; | 529 | desc = desc->more; |
530 | } | 530 | } |
531 | for (i = 0; desc->shadow_ptes[i]; ++i) | 531 | for (i = 0; desc->sptes[i]; ++i) |
532 | ; | 532 | ; |
533 | desc->shadow_ptes[i] = spte; | 533 | desc->sptes[i] = spte; |
534 | } | 534 | } |
535 | return count; | 535 | return count; |
536 | } | 536 | } |
@@ -542,14 +542,14 @@ static void rmap_desc_remove_entry(unsigned long *rmapp, | |||
542 | { | 542 | { |
543 | int j; | 543 | int j; |
544 | 544 | ||
545 | for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j) | 545 | for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j) |
546 | ; | 546 | ; |
547 | desc->shadow_ptes[i] = desc->shadow_ptes[j]; | 547 | desc->sptes[i] = desc->sptes[j]; |
548 | desc->shadow_ptes[j] = NULL; | 548 | desc->sptes[j] = NULL; |
549 | if (j != 0) | 549 | if (j != 0) |
550 | return; | 550 | return; |
551 | if (!prev_desc && !desc->more) | 551 | if (!prev_desc && !desc->more) |
552 | *rmapp = (unsigned long)desc->shadow_ptes[0]; | 552 | *rmapp = (unsigned long)desc->sptes[0]; |
553 | else | 553 | else |
554 | if (prev_desc) | 554 | if (prev_desc) |
555 | prev_desc->more = desc->more; | 555 | prev_desc->more = desc->more; |
@@ -594,8 +594,8 @@ static void rmap_remove(struct kvm *kvm, u64 *spte) | |||
594 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 594 | desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
595 | prev_desc = NULL; | 595 | prev_desc = NULL; |
596 | while (desc) { | 596 | while (desc) { |
597 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) | 597 | for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) |
598 | if (desc->shadow_ptes[i] == spte) { | 598 | if (desc->sptes[i] == spte) { |
599 | rmap_desc_remove_entry(rmapp, | 599 | rmap_desc_remove_entry(rmapp, |
600 | desc, i, | 600 | desc, i, |
601 | prev_desc); | 601 | prev_desc); |
@@ -626,10 +626,10 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte) | |||
626 | prev_desc = NULL; | 626 | prev_desc = NULL; |
627 | prev_spte = NULL; | 627 | prev_spte = NULL; |
628 | while (desc) { | 628 | while (desc) { |
629 | for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) { | 629 | for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) { |
630 | if (prev_spte == spte) | 630 | if (prev_spte == spte) |
631 | return desc->shadow_ptes[i]; | 631 | return desc->sptes[i]; |
632 | prev_spte = desc->shadow_ptes[i]; | 632 | prev_spte = desc->sptes[i]; |
633 | } | 633 | } |
634 | desc = desc->more; | 634 | desc = desc->more; |
635 | } | 635 | } |
@@ -651,7 +651,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
651 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 651 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
652 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); | 652 | rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); |
653 | if (is_writeble_pte(*spte)) { | 653 | if (is_writeble_pte(*spte)) { |
654 | set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK); | 654 | __set_spte(spte, *spte & ~PT_WRITABLE_MASK); |
655 | write_protected = 1; | 655 | write_protected = 1; |
656 | } | 656 | } |
657 | spte = rmap_next(kvm, rmapp, spte); | 657 | spte = rmap_next(kvm, rmapp, spte); |
@@ -675,7 +675,7 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn) | |||
675 | if (is_writeble_pte(*spte)) { | 675 | if (is_writeble_pte(*spte)) { |
676 | rmap_remove(kvm, spte); | 676 | rmap_remove(kvm, spte); |
677 | --kvm->stat.lpages; | 677 | --kvm->stat.lpages; |
678 | set_shadow_pte(spte, shadow_trap_nonpresent_pte); | 678 | __set_spte(spte, shadow_trap_nonpresent_pte); |
679 | spte = NULL; | 679 | spte = NULL; |
680 | write_protected = 1; | 680 | write_protected = 1; |
681 | } | 681 | } |
@@ -694,7 +694,7 @@ static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) | |||
694 | BUG_ON(!(*spte & PT_PRESENT_MASK)); | 694 | BUG_ON(!(*spte & PT_PRESENT_MASK)); |
695 | rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); | 695 | rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); |
696 | rmap_remove(kvm, spte); | 696 | rmap_remove(kvm, spte); |
697 | set_shadow_pte(spte, shadow_trap_nonpresent_pte); | 697 | __set_spte(spte, shadow_trap_nonpresent_pte); |
698 | need_tlb_flush = 1; | 698 | need_tlb_flush = 1; |
699 | } | 699 | } |
700 | return need_tlb_flush; | 700 | return need_tlb_flush; |
@@ -1369,7 +1369,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1369 | } | 1369 | } |
1370 | BUG_ON(!parent_pte); | 1370 | BUG_ON(!parent_pte); |
1371 | kvm_mmu_put_page(sp, parent_pte); | 1371 | kvm_mmu_put_page(sp, parent_pte); |
1372 | set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte); | 1372 | __set_spte(parent_pte, shadow_trap_nonpresent_pte); |
1373 | } | 1373 | } |
1374 | } | 1374 | } |
1375 | 1375 | ||
@@ -1517,7 +1517,7 @@ static void mmu_convert_notrap(struct kvm_mmu_page *sp) | |||
1517 | 1517 | ||
1518 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | 1518 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
1519 | if (pt[i] == shadow_notrap_nonpresent_pte) | 1519 | if (pt[i] == shadow_notrap_nonpresent_pte) |
1520 | set_shadow_pte(&pt[i], shadow_trap_nonpresent_pte); | 1520 | __set_spte(&pt[i], shadow_trap_nonpresent_pte); |
1521 | } | 1521 | } |
1522 | } | 1522 | } |
1523 | 1523 | ||
@@ -1683,7 +1683,7 @@ static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
1683 | return 0; | 1683 | return 0; |
1684 | } | 1684 | } |
1685 | 1685 | ||
1686 | static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1686 | static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
1687 | unsigned pte_access, int user_fault, | 1687 | unsigned pte_access, int user_fault, |
1688 | int write_fault, int dirty, int largepage, | 1688 | int write_fault, int dirty, int largepage, |
1689 | gfn_t gfn, pfn_t pfn, bool speculative, | 1689 | gfn_t gfn, pfn_t pfn, bool speculative, |
@@ -1733,7 +1733,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1733 | * is responsibility of mmu_get_page / kvm_sync_page. | 1733 | * is responsibility of mmu_get_page / kvm_sync_page. |
1734 | * Same reasoning can be applied to dirty page accounting. | 1734 | * Same reasoning can be applied to dirty page accounting. |
1735 | */ | 1735 | */ |
1736 | if (!can_unsync && is_writeble_pte(*shadow_pte)) | 1736 | if (!can_unsync && is_writeble_pte(*sptep)) |
1737 | goto set_pte; | 1737 | goto set_pte; |
1738 | 1738 | ||
1739 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { | 1739 | if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { |
@@ -1750,62 +1750,62 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1750 | mark_page_dirty(vcpu->kvm, gfn); | 1750 | mark_page_dirty(vcpu->kvm, gfn); |
1751 | 1751 | ||
1752 | set_pte: | 1752 | set_pte: |
1753 | set_shadow_pte(shadow_pte, spte); | 1753 | __set_spte(sptep, spte); |
1754 | return ret; | 1754 | return ret; |
1755 | } | 1755 | } |
1756 | 1756 | ||
1757 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | 1757 | static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, |
1758 | unsigned pt_access, unsigned pte_access, | 1758 | unsigned pt_access, unsigned pte_access, |
1759 | int user_fault, int write_fault, int dirty, | 1759 | int user_fault, int write_fault, int dirty, |
1760 | int *ptwrite, int largepage, gfn_t gfn, | 1760 | int *ptwrite, int largepage, gfn_t gfn, |
1761 | pfn_t pfn, bool speculative) | 1761 | pfn_t pfn, bool speculative) |
1762 | { | 1762 | { |
1763 | int was_rmapped = 0; | 1763 | int was_rmapped = 0; |
1764 | int was_writeble = is_writeble_pte(*shadow_pte); | 1764 | int was_writeble = is_writeble_pte(*sptep); |
1765 | int rmap_count; | 1765 | int rmap_count; |
1766 | 1766 | ||
1767 | pgprintk("%s: spte %llx access %x write_fault %d" | 1767 | pgprintk("%s: spte %llx access %x write_fault %d" |
1768 | " user_fault %d gfn %lx\n", | 1768 | " user_fault %d gfn %lx\n", |
1769 | __func__, *shadow_pte, pt_access, | 1769 | __func__, *sptep, pt_access, |
1770 | write_fault, user_fault, gfn); | 1770 | write_fault, user_fault, gfn); |
1771 | 1771 | ||
1772 | if (is_rmap_spte(*shadow_pte)) { | 1772 | if (is_rmap_spte(*sptep)) { |
1773 | /* | 1773 | /* |
1774 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink | 1774 | * If we overwrite a PTE page pointer with a 2MB PMD, unlink |
1775 | * the parent of the now unreachable PTE. | 1775 | * the parent of the now unreachable PTE. |
1776 | */ | 1776 | */ |
1777 | if (largepage && !is_large_pte(*shadow_pte)) { | 1777 | if (largepage && !is_large_pte(*sptep)) { |
1778 | struct kvm_mmu_page *child; | 1778 | struct kvm_mmu_page *child; |
1779 | u64 pte = *shadow_pte; | 1779 | u64 pte = *sptep; |
1780 | 1780 | ||
1781 | child = page_header(pte & PT64_BASE_ADDR_MASK); | 1781 | child = page_header(pte & PT64_BASE_ADDR_MASK); |
1782 | mmu_page_remove_parent_pte(child, shadow_pte); | 1782 | mmu_page_remove_parent_pte(child, sptep); |
1783 | } else if (pfn != spte_to_pfn(*shadow_pte)) { | 1783 | } else if (pfn != spte_to_pfn(*sptep)) { |
1784 | pgprintk("hfn old %lx new %lx\n", | 1784 | pgprintk("hfn old %lx new %lx\n", |
1785 | spte_to_pfn(*shadow_pte), pfn); | 1785 | spte_to_pfn(*sptep), pfn); |
1786 | rmap_remove(vcpu->kvm, shadow_pte); | 1786 | rmap_remove(vcpu->kvm, sptep); |
1787 | } else | 1787 | } else |
1788 | was_rmapped = 1; | 1788 | was_rmapped = 1; |
1789 | } | 1789 | } |
1790 | if (set_spte(vcpu, shadow_pte, pte_access, user_fault, write_fault, | 1790 | if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault, |
1791 | dirty, largepage, gfn, pfn, speculative, true)) { | 1791 | dirty, largepage, gfn, pfn, speculative, true)) { |
1792 | if (write_fault) | 1792 | if (write_fault) |
1793 | *ptwrite = 1; | 1793 | *ptwrite = 1; |
1794 | kvm_x86_ops->tlb_flush(vcpu); | 1794 | kvm_x86_ops->tlb_flush(vcpu); |
1795 | } | 1795 | } |
1796 | 1796 | ||
1797 | pgprintk("%s: setting spte %llx\n", __func__, *shadow_pte); | 1797 | pgprintk("%s: setting spte %llx\n", __func__, *sptep); |
1798 | pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", | 1798 | pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n", |
1799 | is_large_pte(*shadow_pte)? "2MB" : "4kB", | 1799 | is_large_pte(*sptep)? "2MB" : "4kB", |
1800 | is_present_pte(*shadow_pte)?"RW":"R", gfn, | 1800 | is_present_pte(*sptep)?"RW":"R", gfn, |
1801 | *shadow_pte, shadow_pte); | 1801 | *shadow_pte, sptep); |
1802 | if (!was_rmapped && is_large_pte(*shadow_pte)) | 1802 | if (!was_rmapped && is_large_pte(*sptep)) |
1803 | ++vcpu->kvm->stat.lpages; | 1803 | ++vcpu->kvm->stat.lpages; |
1804 | 1804 | ||
1805 | page_header_update_slot(vcpu->kvm, shadow_pte, gfn); | 1805 | page_header_update_slot(vcpu->kvm, sptep, gfn); |
1806 | if (!was_rmapped) { | 1806 | if (!was_rmapped) { |
1807 | rmap_count = rmap_add(vcpu, shadow_pte, gfn, largepage); | 1807 | rmap_count = rmap_add(vcpu, sptep, gfn, largepage); |
1808 | if (!is_rmap_spte(*shadow_pte)) | 1808 | if (!is_rmap_spte(*sptep)) |
1809 | kvm_release_pfn_clean(pfn); | 1809 | kvm_release_pfn_clean(pfn); |
1810 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) | 1810 | if (rmap_count > RMAP_RECYCLE_THRESHOLD) |
1811 | rmap_recycle(vcpu, gfn, largepage); | 1811 | rmap_recycle(vcpu, gfn, largepage); |
@@ -1816,7 +1816,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1816 | kvm_release_pfn_clean(pfn); | 1816 | kvm_release_pfn_clean(pfn); |
1817 | } | 1817 | } |
1818 | if (speculative) { | 1818 | if (speculative) { |
1819 | vcpu->arch.last_pte_updated = shadow_pte; | 1819 | vcpu->arch.last_pte_updated = sptep; |
1820 | vcpu->arch.last_pte_gfn = gfn; | 1820 | vcpu->arch.last_pte_gfn = gfn; |
1821 | } | 1821 | } |
1822 | } | 1822 | } |
@@ -1854,10 +1854,10 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
1854 | return -ENOMEM; | 1854 | return -ENOMEM; |
1855 | } | 1855 | } |
1856 | 1856 | ||
1857 | set_shadow_pte(iterator.sptep, | 1857 | __set_spte(iterator.sptep, |
1858 | __pa(sp->spt) | 1858 | __pa(sp->spt) |
1859 | | PT_PRESENT_MASK | PT_WRITABLE_MASK | 1859 | | PT_PRESENT_MASK | PT_WRITABLE_MASK |
1860 | | shadow_user_mask | shadow_x_mask); | 1860 | | shadow_user_mask | shadow_x_mask); |
1861 | } | 1861 | } |
1862 | } | 1862 | } |
1863 | return pt_write; | 1863 | return pt_write; |
@@ -2389,7 +2389,7 @@ static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu, | |||
2389 | mmu_page_remove_parent_pte(child, spte); | 2389 | mmu_page_remove_parent_pte(child, spte); |
2390 | } | 2390 | } |
2391 | } | 2391 | } |
2392 | set_shadow_pte(spte, shadow_trap_nonpresent_pte); | 2392 | __set_spte(spte, shadow_trap_nonpresent_pte); |
2393 | if (is_large_pte(pte)) | 2393 | if (is_large_pte(pte)) |
2394 | --vcpu->kvm->stat.lpages; | 2394 | --vcpu->kvm->stat.lpages; |
2395 | } | 2395 | } |
@@ -3125,7 +3125,7 @@ static int count_rmaps(struct kvm_vcpu *vcpu) | |||
3125 | d = (struct kvm_rmap_desc *)(*rmapp & ~1ul); | 3125 | d = (struct kvm_rmap_desc *)(*rmapp & ~1ul); |
3126 | while (d) { | 3126 | while (d) { |
3127 | for (k = 0; k < RMAP_EXT; ++k) | 3127 | for (k = 0; k < RMAP_EXT; ++k) |
3128 | if (d->shadow_ptes[k]) | 3128 | if (d->sptes[k]) |
3129 | ++nmaps; | 3129 | ++nmaps; |
3130 | else | 3130 | else |
3131 | break; | 3131 | break; |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 238a193bbf5b..322e8113aeea 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -253,7 +253,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
253 | gpte = *(const pt_element_t *)pte; | 253 | gpte = *(const pt_element_t *)pte; |
254 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { | 254 | if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK)) { |
255 | if (!is_present_gpte(gpte)) | 255 | if (!is_present_gpte(gpte)) |
256 | set_shadow_pte(spte, shadow_notrap_nonpresent_pte); | 256 | __set_spte(spte, shadow_notrap_nonpresent_pte); |
257 | return; | 257 | return; |
258 | } | 258 | } |
259 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); | 259 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
@@ -311,7 +311,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
311 | 311 | ||
312 | if (is_large_pte(*sptep)) { | 312 | if (is_large_pte(*sptep)) { |
313 | rmap_remove(vcpu->kvm, sptep); | 313 | rmap_remove(vcpu->kvm, sptep); |
314 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 314 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
315 | kvm_flush_remote_tlbs(vcpu->kvm); | 315 | kvm_flush_remote_tlbs(vcpu->kvm); |
316 | } | 316 | } |
317 | 317 | ||
@@ -369,7 +369,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
369 | int user_fault = error_code & PFERR_USER_MASK; | 369 | int user_fault = error_code & PFERR_USER_MASK; |
370 | int fetch_fault = error_code & PFERR_FETCH_MASK; | 370 | int fetch_fault = error_code & PFERR_FETCH_MASK; |
371 | struct guest_walker walker; | 371 | struct guest_walker walker; |
372 | u64 *shadow_pte; | 372 | u64 *sptep; |
373 | int write_pt = 0; | 373 | int write_pt = 0; |
374 | int r; | 374 | int r; |
375 | pfn_t pfn; | 375 | pfn_t pfn; |
@@ -422,11 +422,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
422 | if (mmu_notifier_retry(vcpu, mmu_seq)) | 422 | if (mmu_notifier_retry(vcpu, mmu_seq)) |
423 | goto out_unlock; | 423 | goto out_unlock; |
424 | kvm_mmu_free_some_pages(vcpu); | 424 | kvm_mmu_free_some_pages(vcpu); |
425 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, | 425 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
426 | largepage, &write_pt, pfn); | 426 | largepage, &write_pt, pfn); |
427 | 427 | ||
428 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, | 428 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
429 | shadow_pte, *shadow_pte, write_pt); | 429 | sptep, *sptep, write_pt); |
430 | 430 | ||
431 | if (!write_pt) | 431 | if (!write_pt) |
432 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 432 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
@@ -472,7 +472,7 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) | |||
472 | --vcpu->kvm->stat.lpages; | 472 | --vcpu->kvm->stat.lpages; |
473 | need_flush = 1; | 473 | need_flush = 1; |
474 | } | 474 | } |
475 | set_shadow_pte(sptep, shadow_trap_nonpresent_pte); | 475 | __set_spte(sptep, shadow_trap_nonpresent_pte); |
476 | break; | 476 | break; |
477 | } | 477 | } |
478 | 478 | ||
@@ -583,7 +583,7 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
583 | nonpresent = shadow_trap_nonpresent_pte; | 583 | nonpresent = shadow_trap_nonpresent_pte; |
584 | else | 584 | else |
585 | nonpresent = shadow_notrap_nonpresent_pte; | 585 | nonpresent = shadow_notrap_nonpresent_pte; |
586 | set_shadow_pte(&sp->spt[i], nonpresent); | 586 | __set_spte(&sp->spt[i], nonpresent); |
587 | continue; | 587 | continue; |
588 | } | 588 | } |
589 | 589 | ||