diff options
author | Harvey Harrison <harvey.harrison@gmail.com> | 2008-03-03 15:59:56 -0500 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 04:53:27 -0400 |
commit | b8688d51bbe4872fbcec751e04369606082ac610 (patch) | |
tree | a48191f442d49530265dc92e98fc4877d2e286f7 /arch/x86/kvm | |
parent | 71c4dfafc0932d92cc99c7e839d25174b0ce10a1 (diff) |
KVM: replace remaining __FUNCTION__ occurances
__FUNCTION__ is gcc-specific, use __func__
Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/lapic.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 35 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 14 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 14 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 12 |
6 files changed, 44 insertions, 45 deletions
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 68a6b1511934..31280df7d2e3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -658,7 +658,7 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
658 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" | 658 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" |
659 | PRIx64 ", " | 659 | PRIx64 ", " |
660 | "timer initial count 0x%x, period %lldns, " | 660 | "timer initial count 0x%x, period %lldns, " |
661 | "expire @ 0x%016" PRIx64 ".\n", __FUNCTION__, | 661 | "expire @ 0x%016" PRIx64 ".\n", __func__, |
662 | APIC_BUS_CYCLE_NS, ktime_to_ns(now), | 662 | APIC_BUS_CYCLE_NS, ktime_to_ns(now), |
663 | apic_get_reg(apic, APIC_TMICT), | 663 | apic_get_reg(apic, APIC_TMICT), |
664 | apic->timer.period, | 664 | apic->timer.period, |
@@ -691,7 +691,7 @@ static void apic_mmio_write(struct kvm_io_device *this, | |||
691 | /* too common printing */ | 691 | /* too common printing */ |
692 | if (offset != APIC_EOI) | 692 | if (offset != APIC_EOI) |
693 | apic_debug("%s: offset 0x%x with length 0x%x, and value is " | 693 | apic_debug("%s: offset 0x%x with length 0x%x, and value is " |
694 | "0x%x\n", __FUNCTION__, offset, len, val); | 694 | "0x%x\n", __func__, offset, len, val); |
695 | 695 | ||
696 | offset &= 0xff0; | 696 | offset &= 0xff0; |
697 | 697 | ||
@@ -869,7 +869,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
869 | struct kvm_lapic *apic; | 869 | struct kvm_lapic *apic; |
870 | int i; | 870 | int i; |
871 | 871 | ||
872 | apic_debug("%s\n", __FUNCTION__); | 872 | apic_debug("%s\n", __func__); |
873 | 873 | ||
874 | ASSERT(vcpu); | 874 | ASSERT(vcpu); |
875 | apic = vcpu->arch.apic; | 875 | apic = vcpu->arch.apic; |
@@ -907,7 +907,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
907 | apic_update_ppr(apic); | 907 | apic_update_ppr(apic); |
908 | 908 | ||
909 | apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" | 909 | apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" |
910 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__, | 910 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, |
911 | vcpu, kvm_apic_id(apic), | 911 | vcpu, kvm_apic_id(apic), |
912 | vcpu->arch.apic_base, apic->base_address); | 912 | vcpu->arch.apic_base, apic->base_address); |
913 | } | 913 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1932a3aeda1d..414405b6ec13 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -649,7 +649,7 @@ static int is_empty_shadow_page(u64 *spt) | |||
649 | 649 | ||
650 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) | 650 | for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++) |
651 | if (*pos != shadow_trap_nonpresent_pte) { | 651 | if (*pos != shadow_trap_nonpresent_pte) { |
652 | printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__, | 652 | printk(KERN_ERR "%s: %p %llx\n", __func__, |
653 | pos, *pos); | 653 | pos, *pos); |
654 | return 0; | 654 | return 0; |
655 | } | 655 | } |
@@ -772,14 +772,14 @@ static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | |||
772 | struct kvm_mmu_page *sp; | 772 | struct kvm_mmu_page *sp; |
773 | struct hlist_node *node; | 773 | struct hlist_node *node; |
774 | 774 | ||
775 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 775 | pgprintk("%s: looking for gfn %lx\n", __func__, gfn); |
776 | index = kvm_page_table_hashfn(gfn); | 776 | index = kvm_page_table_hashfn(gfn); |
777 | bucket = &kvm->arch.mmu_page_hash[index]; | 777 | bucket = &kvm->arch.mmu_page_hash[index]; |
778 | hlist_for_each_entry(sp, node, bucket, hash_link) | 778 | hlist_for_each_entry(sp, node, bucket, hash_link) |
779 | if (sp->gfn == gfn && !sp->role.metaphysical | 779 | if (sp->gfn == gfn && !sp->role.metaphysical |
780 | && !sp->role.invalid) { | 780 | && !sp->role.invalid) { |
781 | pgprintk("%s: found role %x\n", | 781 | pgprintk("%s: found role %x\n", |
782 | __FUNCTION__, sp->role.word); | 782 | __func__, sp->role.word); |
783 | return sp; | 783 | return sp; |
784 | } | 784 | } |
785 | return NULL; | 785 | return NULL; |
@@ -810,21 +810,21 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
810 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | 810 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
811 | role.quadrant = quadrant; | 811 | role.quadrant = quadrant; |
812 | } | 812 | } |
813 | pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__, | 813 | pgprintk("%s: looking gfn %lx role %x\n", __func__, |
814 | gfn, role.word); | 814 | gfn, role.word); |
815 | index = kvm_page_table_hashfn(gfn); | 815 | index = kvm_page_table_hashfn(gfn); |
816 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 816 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
817 | hlist_for_each_entry(sp, node, bucket, hash_link) | 817 | hlist_for_each_entry(sp, node, bucket, hash_link) |
818 | if (sp->gfn == gfn && sp->role.word == role.word) { | 818 | if (sp->gfn == gfn && sp->role.word == role.word) { |
819 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); | 819 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
820 | pgprintk("%s: found\n", __FUNCTION__); | 820 | pgprintk("%s: found\n", __func__); |
821 | return sp; | 821 | return sp; |
822 | } | 822 | } |
823 | ++vcpu->kvm->stat.mmu_cache_miss; | 823 | ++vcpu->kvm->stat.mmu_cache_miss; |
824 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 824 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
825 | if (!sp) | 825 | if (!sp) |
826 | return sp; | 826 | return sp; |
827 | pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word); | 827 | pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); |
828 | sp->gfn = gfn; | 828 | sp->gfn = gfn; |
829 | sp->role = role; | 829 | sp->role = role; |
830 | hlist_add_head(&sp->hash_link, bucket); | 830 | hlist_add_head(&sp->hash_link, bucket); |
@@ -960,13 +960,13 @@ static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) | |||
960 | struct hlist_node *node, *n; | 960 | struct hlist_node *node, *n; |
961 | int r; | 961 | int r; |
962 | 962 | ||
963 | pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn); | 963 | pgprintk("%s: looking for gfn %lx\n", __func__, gfn); |
964 | r = 0; | 964 | r = 0; |
965 | index = kvm_page_table_hashfn(gfn); | 965 | index = kvm_page_table_hashfn(gfn); |
966 | bucket = &kvm->arch.mmu_page_hash[index]; | 966 | bucket = &kvm->arch.mmu_page_hash[index]; |
967 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) | 967 | hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) |
968 | if (sp->gfn == gfn && !sp->role.metaphysical) { | 968 | if (sp->gfn == gfn && !sp->role.metaphysical) { |
969 | pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn, | 969 | pgprintk("%s: gfn %lx role %x\n", __func__, gfn, |
970 | sp->role.word); | 970 | sp->role.word); |
971 | kvm_mmu_zap_page(kvm, sp); | 971 | kvm_mmu_zap_page(kvm, sp); |
972 | r = 1; | 972 | r = 1; |
@@ -979,7 +979,7 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn) | |||
979 | struct kvm_mmu_page *sp; | 979 | struct kvm_mmu_page *sp; |
980 | 980 | ||
981 | while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { | 981 | while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) { |
982 | pgprintk("%s: zap %lx %x\n", __FUNCTION__, gfn, sp->role.word); | 982 | pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word); |
983 | kvm_mmu_zap_page(kvm, sp); | 983 | kvm_mmu_zap_page(kvm, sp); |
984 | } | 984 | } |
985 | } | 985 | } |
@@ -1021,7 +1021,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1021 | 1021 | ||
1022 | pgprintk("%s: spte %llx access %x write_fault %d" | 1022 | pgprintk("%s: spte %llx access %x write_fault %d" |
1023 | " user_fault %d gfn %lx\n", | 1023 | " user_fault %d gfn %lx\n", |
1024 | __FUNCTION__, *shadow_pte, pt_access, | 1024 | __func__, *shadow_pte, pt_access, |
1025 | write_fault, user_fault, gfn); | 1025 | write_fault, user_fault, gfn); |
1026 | 1026 | ||
1027 | if (is_rmap_pte(*shadow_pte)) { | 1027 | if (is_rmap_pte(*shadow_pte)) { |
@@ -1047,7 +1047,6 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1047 | } | 1047 | } |
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | |||
1051 | /* | 1050 | /* |
1052 | * We don't set the accessed bit, since we sometimes want to see | 1051 | * We don't set the accessed bit, since we sometimes want to see |
1053 | * whether the guest actually used the pte (in order to detect | 1052 | * whether the guest actually used the pte (in order to detect |
@@ -1081,7 +1080,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte, | |||
1081 | if (shadow || | 1080 | if (shadow || |
1082 | (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { | 1081 | (largepage && has_wrprotected_page(vcpu->kvm, gfn))) { |
1083 | pgprintk("%s: found shadow page for %lx, marking ro\n", | 1082 | pgprintk("%s: found shadow page for %lx, marking ro\n", |
1084 | __FUNCTION__, gfn); | 1083 | __func__, gfn); |
1085 | pte_access &= ~ACC_WRITE_MASK; | 1084 | pte_access &= ~ACC_WRITE_MASK; |
1086 | if (is_writeble_pte(spte)) { | 1085 | if (is_writeble_pte(spte)) { |
1087 | spte &= ~PT_WRITABLE_MASK; | 1086 | spte &= ~PT_WRITABLE_MASK; |
@@ -1097,7 +1096,7 @@ unshadowed: | |||
1097 | if (pte_access & ACC_WRITE_MASK) | 1096 | if (pte_access & ACC_WRITE_MASK) |
1098 | mark_page_dirty(vcpu->kvm, gfn); | 1097 | mark_page_dirty(vcpu->kvm, gfn); |
1099 | 1098 | ||
1100 | pgprintk("%s: setting spte %llx\n", __FUNCTION__, spte); | 1099 | pgprintk("%s: setting spte %llx\n", __func__, spte); |
1101 | pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", | 1100 | pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n", |
1102 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", | 1101 | (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB", |
1103 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); | 1102 | (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte); |
@@ -1317,7 +1316,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
1317 | gfn_t gfn; | 1316 | gfn_t gfn; |
1318 | int r; | 1317 | int r; |
1319 | 1318 | ||
1320 | pgprintk("%s: gva %lx error %x\n", __FUNCTION__, gva, error_code); | 1319 | pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code); |
1321 | r = mmu_topup_memory_caches(vcpu); | 1320 | r = mmu_topup_memory_caches(vcpu); |
1322 | if (r) | 1321 | if (r) |
1323 | return r; | 1322 | return r; |
@@ -1395,7 +1394,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu) | |||
1395 | 1394 | ||
1396 | static void paging_new_cr3(struct kvm_vcpu *vcpu) | 1395 | static void paging_new_cr3(struct kvm_vcpu *vcpu) |
1397 | { | 1396 | { |
1398 | pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->arch.cr3); | 1397 | pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3); |
1399 | mmu_free_roots(vcpu); | 1398 | mmu_free_roots(vcpu); |
1400 | } | 1399 | } |
1401 | 1400 | ||
@@ -1691,7 +1690,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
1691 | int npte; | 1690 | int npte; |
1692 | int r; | 1691 | int r; |
1693 | 1692 | ||
1694 | pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); | 1693 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
1695 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); | 1694 | mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes); |
1696 | spin_lock(&vcpu->kvm->mmu_lock); | 1695 | spin_lock(&vcpu->kvm->mmu_lock); |
1697 | kvm_mmu_free_some_pages(vcpu); | 1696 | kvm_mmu_free_some_pages(vcpu); |
@@ -2139,7 +2138,7 @@ static void audit_rmap(struct kvm_vcpu *vcpu) | |||
2139 | 2138 | ||
2140 | if (n_rmap != n_actual) | 2139 | if (n_rmap != n_actual) |
2141 | printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", | 2140 | printk(KERN_ERR "%s: (%s) rmap %d actual %d\n", |
2142 | __FUNCTION__, audit_msg, n_rmap, n_actual); | 2141 | __func__, audit_msg, n_rmap, n_actual); |
2143 | } | 2142 | } |
2144 | 2143 | ||
2145 | static void audit_write_protection(struct kvm_vcpu *vcpu) | 2144 | static void audit_write_protection(struct kvm_vcpu *vcpu) |
@@ -2159,7 +2158,7 @@ static void audit_write_protection(struct kvm_vcpu *vcpu) | |||
2159 | if (*rmapp) | 2158 | if (*rmapp) |
2160 | printk(KERN_ERR "%s: (%s) shadow page has writable" | 2159 | printk(KERN_ERR "%s: (%s) shadow page has writable" |
2161 | " mappings: gfn %lx role %x\n", | 2160 | " mappings: gfn %lx role %x\n", |
2162 | __FUNCTION__, audit_msg, sp->gfn, | 2161 | __func__, audit_msg, sp->gfn, |
2163 | sp->role.word); | 2162 | sp->role.word); |
2164 | } | 2163 | } |
2165 | } | 2164 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 17f9d160ca34..57abbd091143 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -130,7 +130,7 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
130 | unsigned index, pt_access, pte_access; | 130 | unsigned index, pt_access, pte_access; |
131 | gpa_t pte_gpa; | 131 | gpa_t pte_gpa; |
132 | 132 | ||
133 | pgprintk("%s: addr %lx\n", __FUNCTION__, addr); | 133 | pgprintk("%s: addr %lx\n", __func__, addr); |
134 | walk: | 134 | walk: |
135 | walker->level = vcpu->arch.mmu.root_level; | 135 | walker->level = vcpu->arch.mmu.root_level; |
136 | pte = vcpu->arch.cr3; | 136 | pte = vcpu->arch.cr3; |
@@ -155,7 +155,7 @@ walk: | |||
155 | pte_gpa += index * sizeof(pt_element_t); | 155 | pte_gpa += index * sizeof(pt_element_t); |
156 | walker->table_gfn[walker->level - 1] = table_gfn; | 156 | walker->table_gfn[walker->level - 1] = table_gfn; |
157 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 157 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
158 | pgprintk("%s: table_gfn[%d] %lx\n", __FUNCTION__, | 158 | pgprintk("%s: table_gfn[%d] %lx\n", __func__, |
159 | walker->level - 1, table_gfn); | 159 | walker->level - 1, table_gfn); |
160 | 160 | ||
161 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); | 161 | kvm_read_guest(vcpu->kvm, pte_gpa, &pte, sizeof(pte)); |
@@ -222,7 +222,7 @@ walk: | |||
222 | walker->pt_access = pt_access; | 222 | walker->pt_access = pt_access; |
223 | walker->pte_access = pte_access; | 223 | walker->pte_access = pte_access; |
224 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", | 224 | pgprintk("%s: pte %llx pte_access %x pt_access %x\n", |
225 | __FUNCTION__, (u64)pte, pt_access, pte_access); | 225 | __func__, (u64)pte, pt_access, pte_access); |
226 | return 1; | 226 | return 1; |
227 | 227 | ||
228 | not_present: | 228 | not_present: |
@@ -256,7 +256,7 @@ static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page, | |||
256 | set_shadow_pte(spte, shadow_notrap_nonpresent_pte); | 256 | set_shadow_pte(spte, shadow_notrap_nonpresent_pte); |
257 | return; | 257 | return; |
258 | } | 258 | } |
259 | pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte); | 259 | pgprintk("%s: gpte %llx spte %p\n", __func__, (u64)gpte, spte); |
260 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); | 260 | pte_access = page->role.access & FNAME(gpte_access)(vcpu, gpte); |
261 | if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) | 261 | if (gpte_to_gfn(gpte) != vcpu->arch.update_pte.gfn) |
262 | return; | 262 | return; |
@@ -381,7 +381,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
381 | struct page *page; | 381 | struct page *page; |
382 | int largepage = 0; | 382 | int largepage = 0; |
383 | 383 | ||
384 | pgprintk("%s: addr %lx err %x\n", __FUNCTION__, addr, error_code); | 384 | pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code); |
385 | kvm_mmu_audit(vcpu, "pre page fault"); | 385 | kvm_mmu_audit(vcpu, "pre page fault"); |
386 | 386 | ||
387 | r = mmu_topup_memory_caches(vcpu); | 387 | r = mmu_topup_memory_caches(vcpu); |
@@ -399,7 +399,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
399 | * The page is not mapped by the guest. Let the guest handle it. | 399 | * The page is not mapped by the guest. Let the guest handle it. |
400 | */ | 400 | */ |
401 | if (!r) { | 401 | if (!r) { |
402 | pgprintk("%s: guest page fault\n", __FUNCTION__); | 402 | pgprintk("%s: guest page fault\n", __func__); |
403 | inject_page_fault(vcpu, addr, walker.error_code); | 403 | inject_page_fault(vcpu, addr, walker.error_code); |
404 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 404 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ |
405 | up_read(&vcpu->kvm->slots_lock); | 405 | up_read(&vcpu->kvm->slots_lock); |
@@ -431,7 +431,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, | |||
431 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, | 431 | shadow_pte = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
432 | largepage, &write_pt, page); | 432 | largepage, &write_pt, page); |
433 | 433 | ||
434 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __FUNCTION__, | 434 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
435 | shadow_pte, *shadow_pte, write_pt); | 435 | shadow_pte, *shadow_pte, write_pt); |
436 | 436 | ||
437 | if (!write_pt) | 437 | if (!write_pt) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ff6e5c8da3c6..b2c667fe6832 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -230,12 +230,12 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
230 | struct vcpu_svm *svm = to_svm(vcpu); | 230 | struct vcpu_svm *svm = to_svm(vcpu); |
231 | 231 | ||
232 | if (!svm->next_rip) { | 232 | if (!svm->next_rip) { |
233 | printk(KERN_DEBUG "%s: NOP\n", __FUNCTION__); | 233 | printk(KERN_DEBUG "%s: NOP\n", __func__); |
234 | return; | 234 | return; |
235 | } | 235 | } |
236 | if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) | 236 | if (svm->next_rip - svm->vmcb->save.rip > MAX_INST_SIZE) |
237 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", | 237 | printk(KERN_ERR "%s: ip 0x%llx next 0x%llx\n", |
238 | __FUNCTION__, | 238 | __func__, |
239 | svm->vmcb->save.rip, | 239 | svm->vmcb->save.rip, |
240 | svm->next_rip); | 240 | svm->next_rip); |
241 | 241 | ||
@@ -996,7 +996,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
996 | } | 996 | } |
997 | default: | 997 | default: |
998 | printk(KERN_DEBUG "%s: unexpected dr %u\n", | 998 | printk(KERN_DEBUG "%s: unexpected dr %u\n", |
999 | __FUNCTION__, dr); | 999 | __func__, dr); |
1000 | *exception = UD_VECTOR; | 1000 | *exception = UD_VECTOR; |
1001 | return; | 1001 | return; |
1002 | } | 1002 | } |
@@ -1109,7 +1109,7 @@ static int invalid_op_interception(struct vcpu_svm *svm, | |||
1109 | static int task_switch_interception(struct vcpu_svm *svm, | 1109 | static int task_switch_interception(struct vcpu_svm *svm, |
1110 | struct kvm_run *kvm_run) | 1110 | struct kvm_run *kvm_run) |
1111 | { | 1111 | { |
1112 | pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __FUNCTION__); | 1112 | pr_unimpl(&svm->vcpu, "%s: task switch is unsupported\n", __func__); |
1113 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; | 1113 | kvm_run->exit_reason = KVM_EXIT_UNKNOWN; |
1114 | return 0; | 1114 | return 0; |
1115 | } | 1115 | } |
@@ -1125,7 +1125,7 @@ static int emulate_on_interception(struct vcpu_svm *svm, | |||
1125 | struct kvm_run *kvm_run) | 1125 | struct kvm_run *kvm_run) |
1126 | { | 1126 | { |
1127 | if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) | 1127 | if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE) |
1128 | pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); | 1128 | pr_unimpl(&svm->vcpu, "%s: failed\n", __func__); |
1129 | return 1; | 1129 | return 1; |
1130 | } | 1130 | } |
1131 | 1131 | ||
@@ -1257,7 +1257,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) | |||
1257 | case MSR_IA32_DEBUGCTLMSR: | 1257 | case MSR_IA32_DEBUGCTLMSR: |
1258 | if (!svm_has(SVM_FEATURE_LBRV)) { | 1258 | if (!svm_has(SVM_FEATURE_LBRV)) { |
1259 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", | 1259 | pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n", |
1260 | __FUNCTION__, data); | 1260 | __func__, data); |
1261 | break; | 1261 | break; |
1262 | } | 1262 | } |
1263 | if (data & DEBUGCTL_RESERVED_BITS) | 1263 | if (data & DEBUGCTL_RESERVED_BITS) |
@@ -1419,7 +1419,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1419 | exit_code != SVM_EXIT_NPF) | 1419 | exit_code != SVM_EXIT_NPF) |
1420 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " | 1420 | printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x " |
1421 | "exit_code 0x%x\n", | 1421 | "exit_code 0x%x\n", |
1422 | __FUNCTION__, svm->vmcb->control.exit_int_info, | 1422 | __func__, svm->vmcb->control.exit_int_info, |
1423 | exit_code); | 1423 | exit_code); |
1424 | 1424 | ||
1425 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) | 1425 | if (exit_code >= ARRAY_SIZE(svm_exit_handlers) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 50345032974d..7ef710afceba 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1254,7 +1254,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu) | |||
1254 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); | 1254 | guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES); |
1255 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { | 1255 | if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { |
1256 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", | 1256 | printk(KERN_DEBUG "%s: tss fixup for long mode. \n", |
1257 | __FUNCTION__); | 1257 | __func__); |
1258 | vmcs_write32(GUEST_TR_AR_BYTES, | 1258 | vmcs_write32(GUEST_TR_AR_BYTES, |
1259 | (guest_tr_ar & ~AR_TYPE_MASK) | 1259 | (guest_tr_ar & ~AR_TYPE_MASK) |
1260 | | AR_TYPE_BUSY_64_TSS); | 1260 | | AR_TYPE_BUSY_64_TSS); |
@@ -1909,7 +1909,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1909 | if ((vect_info & VECTORING_INFO_VALID_MASK) && | 1909 | if ((vect_info & VECTORING_INFO_VALID_MASK) && |
1910 | !is_page_fault(intr_info)) | 1910 | !is_page_fault(intr_info)) |
1911 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " | 1911 | printk(KERN_ERR "%s: unexpected, vectoring info 0x%x " |
1912 | "intr info 0x%x\n", __FUNCTION__, vect_info, intr_info); | 1912 | "intr info 0x%x\n", __func__, vect_info, intr_info); |
1913 | 1913 | ||
1914 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { | 1914 | if (!irqchip_in_kernel(vcpu->kvm) && is_external_interrupt(vect_info)) { |
1915 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; | 1915 | int irq = vect_info & VECTORING_INFO_VECTOR_MASK; |
@@ -2275,7 +2275,7 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2275 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && | 2275 | if ((vectoring_info & VECTORING_INFO_VALID_MASK) && |
2276 | exit_reason != EXIT_REASON_EXCEPTION_NMI) | 2276 | exit_reason != EXIT_REASON_EXCEPTION_NMI) |
2277 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " | 2277 | printk(KERN_WARNING "%s: unexpected, valid vectoring info and " |
2278 | "exit reason is 0x%x\n", __FUNCTION__, exit_reason); | 2278 | "exit reason is 0x%x\n", __func__, exit_reason); |
2279 | if (exit_reason < kvm_vmx_max_exit_handlers | 2279 | if (exit_reason < kvm_vmx_max_exit_handlers |
2280 | && kvm_vmx_exit_handlers[exit_reason]) | 2280 | && kvm_vmx_exit_handlers[exit_reason]) |
2281 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); | 2281 | return kvm_vmx_exit_handlers[exit_reason](vcpu, kvm_run); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 491eda308289..bf78d6522d3d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -563,15 +563,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
563 | break; | 563 | break; |
564 | case MSR_IA32_MC0_STATUS: | 564 | case MSR_IA32_MC0_STATUS: |
565 | pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", | 565 | pr_unimpl(vcpu, "%s: MSR_IA32_MC0_STATUS 0x%llx, nop\n", |
566 | __FUNCTION__, data); | 566 | __func__, data); |
567 | break; | 567 | break; |
568 | case MSR_IA32_MCG_STATUS: | 568 | case MSR_IA32_MCG_STATUS: |
569 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", | 569 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_STATUS 0x%llx, nop\n", |
570 | __FUNCTION__, data); | 570 | __func__, data); |
571 | break; | 571 | break; |
572 | case MSR_IA32_MCG_CTL: | 572 | case MSR_IA32_MCG_CTL: |
573 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", | 573 | pr_unimpl(vcpu, "%s: MSR_IA32_MCG_CTL 0x%llx, nop\n", |
574 | __FUNCTION__, data); | 574 | __func__, data); |
575 | break; | 575 | break; |
576 | case MSR_IA32_UCODE_REV: | 576 | case MSR_IA32_UCODE_REV: |
577 | case MSR_IA32_UCODE_WRITE: | 577 | case MSR_IA32_UCODE_WRITE: |
@@ -1939,7 +1939,7 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | |||
1939 | *dest = kvm_x86_ops->get_dr(vcpu, dr); | 1939 | *dest = kvm_x86_ops->get_dr(vcpu, dr); |
1940 | return X86EMUL_CONTINUE; | 1940 | return X86EMUL_CONTINUE; |
1941 | default: | 1941 | default: |
1942 | pr_unimpl(vcpu, "%s: unexpected dr %u\n", __FUNCTION__, dr); | 1942 | pr_unimpl(vcpu, "%s: unexpected dr %u\n", __func__, dr); |
1943 | return X86EMUL_UNHANDLEABLE; | 1943 | return X86EMUL_UNHANDLEABLE; |
1944 | } | 1944 | } |
1945 | } | 1945 | } |
@@ -2486,7 +2486,7 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | |||
2486 | case 8: | 2486 | case 8: |
2487 | return kvm_get_cr8(vcpu); | 2487 | return kvm_get_cr8(vcpu); |
2488 | default: | 2488 | default: |
2489 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); | 2489 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); |
2490 | return 0; | 2490 | return 0; |
2491 | } | 2491 | } |
2492 | } | 2492 | } |
@@ -2512,7 +2512,7 @@ void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | |||
2512 | kvm_set_cr8(vcpu, val & 0xfUL); | 2512 | kvm_set_cr8(vcpu, val & 0xfUL); |
2513 | break; | 2513 | break; |
2514 | default: | 2514 | default: |
2515 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __FUNCTION__, cr); | 2515 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); |
2516 | } | 2516 | } |
2517 | } | 2517 | } |
2518 | 2518 | ||