diff options
author | Stephen Hemminger <stephen@networkplumber.org> | 2013-12-29 15:12:29 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2014-01-08 16:02:58 -0500 |
commit | 7940876e1330671708186ac3386aa521ffb5c182 (patch) | |
tree | abe06165870e77111ba9e3d1b5f900c1cd8c4cef | |
parent | 2f0a6397dd3cac2fb05b46cad08c1d532c04d6b8 (diff) |
kvm: make local functions static
Running 'make namespacecheck' found lots of functions that
should be declared static, since only used in one file.
Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | include/linux/kvm_host.h | 16 | ||||
-rw-r--r-- | virt/kvm/ioapic.c | 2 | ||||
-rw-r--r-- | virt/kvm/ioapic.h | 1 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 35 |
4 files changed, 19 insertions, 35 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 1f46f66f60ab..4306c5608f6d 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -463,8 +463,6 @@ void kvm_exit(void); | |||
463 | 463 | ||
464 | void kvm_get_kvm(struct kvm *kvm); | 464 | void kvm_get_kvm(struct kvm *kvm); |
465 | void kvm_put_kvm(struct kvm *kvm); | 465 | void kvm_put_kvm(struct kvm *kvm); |
466 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, | ||
467 | u64 last_generation); | ||
468 | 466 | ||
469 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) | 467 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
470 | { | 468 | { |
@@ -537,7 +535,6 @@ unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); | |||
537 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); | 535 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
538 | void kvm_release_page_clean(struct page *page); | 536 | void kvm_release_page_clean(struct page *page); |
539 | void kvm_release_page_dirty(struct page *page); | 537 | void kvm_release_page_dirty(struct page *page); |
540 | void kvm_set_page_dirty(struct page *page); | ||
541 | void kvm_set_page_accessed(struct page *page); | 538 | void kvm_set_page_accessed(struct page *page); |
542 | 539 | ||
543 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | 540 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
@@ -549,7 +546,6 @@ pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | |||
549 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); | 546 | pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
550 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); | 547 | pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
551 | 548 | ||
552 | void kvm_release_pfn_dirty(pfn_t pfn); | ||
553 | void kvm_release_pfn_clean(pfn_t pfn); | 549 | void kvm_release_pfn_clean(pfn_t pfn); |
554 | void kvm_set_pfn_dirty(pfn_t pfn); | 550 | void kvm_set_pfn_dirty(pfn_t pfn); |
555 | void kvm_set_pfn_accessed(pfn_t pfn); | 551 | void kvm_set_pfn_accessed(pfn_t pfn); |
@@ -576,8 +572,6 @@ struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | |||
576 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | 572 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
577 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); | 573 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
578 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | 574 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
579 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, | ||
580 | gfn_t gfn); | ||
581 | 575 | ||
582 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | 576 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
583 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | 577 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
@@ -604,8 +598,6 @@ int kvm_get_dirty_log(struct kvm *kvm, | |||
604 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | 598 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
605 | struct kvm_dirty_log *log); | 599 | struct kvm_dirty_log *log); |
606 | 600 | ||
607 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | ||
608 | struct kvm_userspace_memory_region *mem); | ||
609 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, | 601 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
610 | bool line_status); | 602 | bool line_status); |
611 | long kvm_arch_vm_ioctl(struct file *filp, | 603 | long kvm_arch_vm_ioctl(struct file *filp, |
@@ -653,8 +645,6 @@ void kvm_arch_check_processor_compat(void *rtn); | |||
653 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | 645 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
654 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); | 646 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
655 | 647 | ||
656 | void kvm_free_physmem(struct kvm *kvm); | ||
657 | |||
658 | void *kvm_kvzalloc(unsigned long size); | 648 | void *kvm_kvzalloc(unsigned long size); |
659 | void kvm_kvfree(const void *addr); | 649 | void kvm_kvfree(const void *addr); |
660 | 650 | ||
@@ -1097,12 +1087,6 @@ static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |||
1097 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | 1087 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
1098 | { | 1088 | { |
1099 | } | 1089 | } |
1100 | |||
1101 | static inline bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | ||
1102 | { | ||
1103 | return true; | ||
1104 | } | ||
1105 | |||
1106 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | 1090 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1107 | #endif | 1091 | #endif |
1108 | 1092 | ||
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 2d682977ce82..ce9ed99ad7dc 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c | |||
@@ -520,7 +520,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len, | |||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic) | 523 | static void kvm_ioapic_reset(struct kvm_ioapic *ioapic) |
524 | { | 524 | { |
525 | int i; | 525 | int i; |
526 | 526 | ||
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h index 615d8c995c3c..90d43e95dcf8 100644 --- a/virt/kvm/ioapic.h +++ b/virt/kvm/ioapic.h | |||
@@ -91,7 +91,6 @@ void kvm_ioapic_destroy(struct kvm *kvm); | |||
91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, | 91 | int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, |
92 | int level, bool line_status); | 92 | int level, bool line_status); |
93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); | 93 | void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); |
94 | void kvm_ioapic_reset(struct kvm_ioapic *ioapic); | ||
95 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, | 94 | int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, |
96 | struct kvm_lapic_irq *irq, unsigned long *dest_map); | 95 | struct kvm_lapic_irq *irq, unsigned long *dest_map); |
97 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); | 96 | int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3efba97bdce2..e7c6ddd8ecc0 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
@@ -95,6 +95,12 @@ static int hardware_enable_all(void); | |||
95 | static void hardware_disable_all(void); | 95 | static void hardware_disable_all(void); |
96 | 96 | ||
97 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); | 97 | static void kvm_io_bus_destroy(struct kvm_io_bus *bus); |
98 | static void update_memslots(struct kvm_memslots *slots, | ||
99 | struct kvm_memory_slot *new, u64 last_generation); | ||
100 | |||
101 | static void kvm_release_pfn_dirty(pfn_t pfn); | ||
102 | static void mark_page_dirty_in_slot(struct kvm *kvm, | ||
103 | struct kvm_memory_slot *memslot, gfn_t gfn); | ||
98 | 104 | ||
99 | bool kvm_rebooting; | 105 | bool kvm_rebooting; |
100 | EXPORT_SYMBOL_GPL(kvm_rebooting); | 106 | EXPORT_SYMBOL_GPL(kvm_rebooting); |
@@ -553,7 +559,7 @@ static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
553 | free->npages = 0; | 559 | free->npages = 0; |
554 | } | 560 | } |
555 | 561 | ||
556 | void kvm_free_physmem(struct kvm *kvm) | 562 | static void kvm_free_physmem(struct kvm *kvm) |
557 | { | 563 | { |
558 | struct kvm_memslots *slots = kvm->memslots; | 564 | struct kvm_memslots *slots = kvm->memslots; |
559 | struct kvm_memory_slot *memslot; | 565 | struct kvm_memory_slot *memslot; |
@@ -675,8 +681,9 @@ static void sort_memslots(struct kvm_memslots *slots) | |||
675 | slots->id_to_index[slots->memslots[i].id] = i; | 681 | slots->id_to_index[slots->memslots[i].id] = i; |
676 | } | 682 | } |
677 | 683 | ||
678 | void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, | 684 | static void update_memslots(struct kvm_memslots *slots, |
679 | u64 last_generation) | 685 | struct kvm_memory_slot *new, |
686 | u64 last_generation) | ||
680 | { | 687 | { |
681 | if (new) { | 688 | if (new) { |
682 | int id = new->id; | 689 | int id = new->id; |
@@ -924,8 +931,8 @@ int kvm_set_memory_region(struct kvm *kvm, | |||
924 | } | 931 | } |
925 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); | 932 | EXPORT_SYMBOL_GPL(kvm_set_memory_region); |
926 | 933 | ||
927 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | 934 | static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, |
928 | struct kvm_userspace_memory_region *mem) | 935 | struct kvm_userspace_memory_region *mem) |
929 | { | 936 | { |
930 | if (mem->slot >= KVM_USER_MEM_SLOTS) | 937 | if (mem->slot >= KVM_USER_MEM_SLOTS) |
931 | return -EINVAL; | 938 | return -EINVAL; |
@@ -1047,7 +1054,7 @@ static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, | |||
1047 | } | 1054 | } |
1048 | 1055 | ||
1049 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | 1056 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, |
1050 | gfn_t gfn) | 1057 | gfn_t gfn) |
1051 | { | 1058 | { |
1052 | return gfn_to_hva_many(slot, gfn, NULL); | 1059 | return gfn_to_hva_many(slot, gfn, NULL); |
1053 | } | 1060 | } |
@@ -1387,18 +1394,11 @@ void kvm_release_page_dirty(struct page *page) | |||
1387 | } | 1394 | } |
1388 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); | 1395 | EXPORT_SYMBOL_GPL(kvm_release_page_dirty); |
1389 | 1396 | ||
1390 | void kvm_release_pfn_dirty(pfn_t pfn) | 1397 | static void kvm_release_pfn_dirty(pfn_t pfn) |
1391 | { | 1398 | { |
1392 | kvm_set_pfn_dirty(pfn); | 1399 | kvm_set_pfn_dirty(pfn); |
1393 | kvm_release_pfn_clean(pfn); | 1400 | kvm_release_pfn_clean(pfn); |
1394 | } | 1401 | } |
1395 | EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty); | ||
1396 | |||
1397 | void kvm_set_page_dirty(struct page *page) | ||
1398 | { | ||
1399 | kvm_set_pfn_dirty(page_to_pfn(page)); | ||
1400 | } | ||
1401 | EXPORT_SYMBOL_GPL(kvm_set_page_dirty); | ||
1402 | 1402 | ||
1403 | void kvm_set_pfn_dirty(pfn_t pfn) | 1403 | void kvm_set_pfn_dirty(pfn_t pfn) |
1404 | { | 1404 | { |
@@ -1640,8 +1640,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) | |||
1640 | } | 1640 | } |
1641 | EXPORT_SYMBOL_GPL(kvm_clear_guest); | 1641 | EXPORT_SYMBOL_GPL(kvm_clear_guest); |
1642 | 1642 | ||
1643 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, | 1643 | static void mark_page_dirty_in_slot(struct kvm *kvm, |
1644 | gfn_t gfn) | 1644 | struct kvm_memory_slot *memslot, |
1645 | gfn_t gfn) | ||
1645 | { | 1646 | { |
1646 | if (memslot && memslot->dirty_bitmap) { | 1647 | if (memslot && memslot->dirty_bitmap) { |
1647 | unsigned long rel_gfn = gfn - memslot->base_gfn; | 1648 | unsigned long rel_gfn = gfn - memslot->base_gfn; |
@@ -1757,7 +1758,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to); | |||
1757 | * locking does not harm. It may result in trying to yield to same VCPU, fail | 1758 | * locking does not harm. It may result in trying to yield to same VCPU, fail |
1758 | * and continue with next VCPU and so on. | 1759 | * and continue with next VCPU and so on. |
1759 | */ | 1760 | */ |
1760 | bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) | 1761 | static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) |
1761 | { | 1762 | { |
1762 | bool eligible; | 1763 | bool eligible; |
1763 | 1764 | ||