diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-03-03 15:59:56 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:27 -0400 |
commit | b8688d51bbe4872fbcec751e04369606082ac610 (patch) | |
tree | a48191f442d49530265dc92e98fc4877d2e286f7 /arch/x86/kvm/mmu.c | |
parent | 71c4dfafc0932d92cc99c7e839d25174b0ce10a1 (diff) |
KVM: replace remaining __FUNCTION__ occurances
__FUNCTION__ is gcc-specific, use __func__
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 35 |
1 files changed, 17 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1932a3aeda1d..414405b6ec13 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -649,7 +649,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
649 | 649 | ||
650 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) | 650 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
651 | if (*pos != shadow_trap_nonpresent_pte) { | 651 | if (*pos != shadow_trap_nonpresent_pte) { |
652 | printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, | 652 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
653 | pos, *pos); | 653 | pos, *pos); |
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
@@ -772,14 +772,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
772 | struct kvm_mmu_page *sp; | 772 | struct kvm_mmu_page *sp; |
773 | struct hlist_node *node; | 773 | struct hlist_node *node; |
774 | 774 | ||
775 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 775 | pgprintk("%s: looking for gfn %lx\n", __func__, gfn); |
776 | index = kvm_page_table_hashfn(gfn); | 776 | index = kvm_page_table_hashfn(gfn); |
777 | bucket = &kvm->arch.mmu_page_hash[index]; | 777 | bucket = &kvm->arch.mmu_page_hash[index]; |
778 | hlist_for_each_entry(sp, node, bucket, hash_link) | 778 | hlist_for_each_entry(sp, node, bucket, hash_link) |
779 | if (sp->gfn == gfn && !sp->role.metaphysical | 779 | if (sp->gfn == gfn && !sp->role.metaphysical |
780 | && !sp->role.invalid) { | 780 | && !sp->role.invalid) { |
781 | pgprintk("%s: found role %x\n", | 781 | pgprintk("%s: found role %x\n", |
782 | __FUNCTION__, sp->role.word); | 782 | __func__, sp->role.word); |
783 | return sp; | 783 | return sp; |
784 | } | 784 | } |
785 | return NULL; | 785 | return NULL; |
@@ -810,21 +810,21 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
810 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | 810 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
811 | role.quadrant = quadrant; | 811 | role.quadrant = quadrant; |
812 | } | 812 | } |
813 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | 813 | pgprintk("%s: looking gfn %lx role %x\n", __func__, |
814 | gfn, role.word); | 814 | gfn, role.word); |
815 | index = kvm_page_table_hashfn(gfn); | 815 | index = kvm_page_table_hashfn(gfn); |
816 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 816 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
817 | hlist_for_each_entry(sp, node, bucket, hash_link) | 817 | hlist_for_each_entry(sp, node, bucket, hash_link) |
818 | if (sp->gfn == gfn && sp->role.word == role.word) { | 818 | if (sp->gfn == gfn && sp->role.word == role.word) { |
819 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); | 819 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
820 | pgprintk("%s: found\n", __FUNCTION__); | 820 | pgprintk("%s: found\n", __func__); |
821 | return sp; | 821 | return sp; |
822 | } | 822 | } |
823 | ++vcpu->kvm->stat.mmu_cache_miss; | 823 | ++vcpu->kvm->stat.mmu_cache_miss; |
824 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 824 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
825 | if (!sp) | 825 | if (!sp) |
826 | return sp; | 826 | return sp; |
827 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); | 827 | pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); |
828 | sp->gfn = gfn; | 828 | sp->gfn = gfn; |
829 | sp->role = role; | 829 | sp->role = role; |
830 | hlist_add_head(&sp->hash_link, bucket); | 830 | hlist_add_head(&sp->hash_link, bucket); |
@@ -960,13 +960,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
960 | struct hlist_node *node, *n; | 960 | struct hlist_node *node, *n; |
961 | int r; | 961 | int r; |
962 | 962 | ||
963 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 963 | pgprintk("%s: looking for gfn %lx\n", __func__, gfn); |
964 | r = 0; | 964 | r = 0; |
965 | index = kvm_page_table_hashfn(gfn); | 965 | index = kvm_page_table_hashfn(gfn); |
966 | bucket = &kvm->arch.mmu_page_hash[index]; | 966 | bucket = &kvm->arch.mmu_page_hash[index]; |
967 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 967 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
968 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 968 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
969 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 969 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, |
970 | sp->role.word); | 970 | sp->role.word); |
971 | kvm_mmu_zap_page(kvm, sp); | 971 | kvm_mmu_zap_page(kvm, sp); |
972 | r = 1; | 972 | r = 1; |
@@ -979,7 +979,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
979 | struct kvm_mmu_page *sp; | 979 | struct kvm_mmu_page *sp; |
980 | 980 | ||
981 | while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { | 981 | while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { |
982 | pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word); | 982 | pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); |
983 | kvm_mmu_zap_page(kvm, sp); | 983 | kvm_mmu_zap_page(kvm, sp); |
984 | } | 984 | } |
985 | } | 985 | } |
@@ -1021,7 +1021,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1021 | 1021 | ||
1022 | pgprintk("%s: spte %llx access %x write_fault %d" | 1022 | pgprintk("%s: spte %llx access %x write_fault %d" |
1023 | " user_fault %d gfn %lx\n", | 1023 | " user_fault %d gfn %lx\n", |
1024 | __FUNCTION__, *shadow_pte, pt_access, | 1024 | __func__, *shadow_pte, pt_access, |
1025 | write_fault, user_fault, gfn); | 1025 | write_fault, user_fault, gfn); |
1026 | 1026 | ||
1027 | if (is_rmap_pte(*shadow_pte)) { | 1027 | if (is_rmap_pte(*shadow_pte)) { |
@@ -1047,7 +1047,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1047 | } | 1047 | } |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | |||
1051 | /* | 1050 | /* |
1052 | * We don't set the accessed bit, since we sometimes want to see | 1051 | * We don't set the accessed bit, since we sometimes want to see |
1053 | * whether the guest actually used the pte (in order to detect | 1052 | * whether the guest actually used the pte (in order to detect |
@@ -1081,7 +1080,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1081 | if (shadow || | 1080 | if (shadow || |
1082 | (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { | 1081 | (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { |
1083 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 1082 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
1084 | __FUNCTION__, gfn); | 1083 | __func__, gfn); |
1085 | pte_access &= ~ACC_WRITE_MASK; | 1084 | pte_access &= ~ACC_WRITE_MASK; |
1086 | if (is_writeble_pte(spte)) { | 1085 | if (is_writeble_pte(spte)) { |
1087 | spte &= ~PT_WRITABLE_MASK; | 1086 | spte &= ~PT_WRITABLE_MASK; |
@@ -1097,7 +1096,7 @@ unshadowed: | |||
1097 | if (pte_access & ACC_WRITE_MASK) | 1096 | if (pte_access & ACC_WRITE_MASK) |
1098 | mark_page_dirty(vcpu->kvm, gfn); | 1097 | mark_page_dirty(vcpu->kvm, gfn); |
1099 | 1098 | ||
1100 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | 1099 | pgprintk("%s: setting spte %llx\n", __func__, spte); |
1101 | pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", | 1100 | pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", |
1102 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", | 1101 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", |
1103 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); | 1102 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); |
@@ -1317,7 +1316,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
1317 | gfn_t gfn; | 1316 | gfn_t gfn; |
1318 | int r; | 1317 | int r; |
1319 | 1318 | ||
1320 | pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code); | 1319 | pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); |
1321 | r = mmu_topup_memory_caches(vcpu); | 1320 | r = mmu_topup_memory_caches(vcpu); |
1322 | if (r) | 1321 | if (r) |
1323 | return r; | 1322 | return r; |
@@ -1395,7 +1394,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
1395 | 1394 | ||
1396 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 1395 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
1397 | { | 1396 | { |
1398 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3); | 1397 | pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3); |
1399 | mmu_free_roots(vcpu); | 1398 | mmu_free_roots(vcpu); |
1400 | } | 1399 | } |
1401 | 1400 | ||
@@ -1691,7 +1690,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1691 | int npte; | 1690 | int npte; |
1692 | int r; | 1691 | int r; |
1693 | 1692 | ||
1694 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | 1693 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
1695 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); | 1694 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); |
1696 | spin_lock(&vcpu->kvm->mmu_lock); | 1695 | spin_lock(&vcpu->kvm->mmu_lock); |
1697 | kvm_mmu_free_some_pages(vcpu); | 1696 | kvm_mmu_free_some_pages(vcpu); |
@@ -2139,7 +2138,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu) | |||
2139 | 2138 | ||
2140 | if (n_rmap != n_actual) | 2139 | if (n_rmap != n_actual) |
2141 | printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", | 2140 | printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", |
2142 | __FUNCTION__, audit_msg, n_rmap, n_actual); | 2141 | __func__, audit_msg, n_rmap, n_actual); |
2143 | } | 2142 | } |
2144 | 2143 | ||
2145 | static void audit_write_protection(struct kvm_vcpu *vcpu) | 2144 | static void audit_write_protection(struct kvm_vcpu *vcpu) |
@@ -2159,7 +2158,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
2159 | if (*rmapp) | 2158 | if (*rmapp) |
2160 | printk(KERN_ERR "%s: (%s) shadow page has writable" | 2159 | printk(KERN_ERR "%s: (%s) shadow page has writable" |
2161 | " mappings: gfn %lx role %x\n", | 2160 | " mappings: gfn %lx role %x\n", |
2162 | __FUNCTION__, audit_msg, sp->gfn, | 2161 | __func__, audit_msg, sp->gfn, |
2163 | sp->role.word); | 2162 | sp->role.word); |
2164 | } | 2163 | } |
2165 | } | 2164 | } |