aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/ioapic.c2
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/kvm_main.c35
3 files changed, 19 insertions, 19 deletions
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 2d682977ce82..ce9ed99ad7dc 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -520,7 +520,7 @@ static int ioapic_mmio_write(struct kvm_io_device *this, gpa_t addr, int len,
520 return 0; 520 return 0;
521} 521}
522 522
523void kvm_ioapic_reset(struct kvm_ioapic *ioapic) 523static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
524{ 524{
525 int i; 525 int i;
526 526
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 615d8c995c3c..90d43e95dcf8 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -91,7 +91,6 @@ void kvm_ioapic_destroy(struct kvm *kvm);
91int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id, 91int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
92 int level, bool line_status); 92 int level, bool line_status);
93void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id); 93void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
94void kvm_ioapic_reset(struct kvm_ioapic *ioapic);
95int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src, 94int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
96 struct kvm_lapic_irq *irq, unsigned long *dest_map); 95 struct kvm_lapic_irq *irq, unsigned long *dest_map);
97int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); 96int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 3efba97bdce2..e7c6ddd8ecc0 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -95,6 +95,12 @@ static int hardware_enable_all(void);
95static void hardware_disable_all(void); 95static void hardware_disable_all(void);
96 96
97static void kvm_io_bus_destroy(struct kvm_io_bus *bus); 97static void kvm_io_bus_destroy(struct kvm_io_bus *bus);
98static void update_memslots(struct kvm_memslots *slots,
99 struct kvm_memory_slot *new, u64 last_generation);
100
101static void kvm_release_pfn_dirty(pfn_t pfn);
102static void mark_page_dirty_in_slot(struct kvm *kvm,
103 struct kvm_memory_slot *memslot, gfn_t gfn);
98 104
99bool kvm_rebooting; 105bool kvm_rebooting;
100EXPORT_SYMBOL_GPL(kvm_rebooting); 106EXPORT_SYMBOL_GPL(kvm_rebooting);
@@ -553,7 +559,7 @@ static void kvm_free_physmem_slot(struct kvm *kvm, struct kvm_memory_slot *free,
553 free->npages = 0; 559 free->npages = 0;
554} 560}
555 561
556void kvm_free_physmem(struct kvm *kvm) 562static void kvm_free_physmem(struct kvm *kvm)
557{ 563{
558 struct kvm_memslots *slots = kvm->memslots; 564 struct kvm_memslots *slots = kvm->memslots;
559 struct kvm_memory_slot *memslot; 565 struct kvm_memory_slot *memslot;
@@ -675,8 +681,9 @@ static void sort_memslots(struct kvm_memslots *slots)
675 slots->id_to_index[slots->memslots[i].id] = i; 681 slots->id_to_index[slots->memslots[i].id] = i;
676} 682}
677 683
678void update_memslots(struct kvm_memslots *slots, struct kvm_memory_slot *new, 684static void update_memslots(struct kvm_memslots *slots,
679 u64 last_generation) 685 struct kvm_memory_slot *new,
686 u64 last_generation)
680{ 687{
681 if (new) { 688 if (new) {
682 int id = new->id; 689 int id = new->id;
@@ -924,8 +931,8 @@ int kvm_set_memory_region(struct kvm *kvm,
924} 931}
925EXPORT_SYMBOL_GPL(kvm_set_memory_region); 932EXPORT_SYMBOL_GPL(kvm_set_memory_region);
926 933
927int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, 934static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
928 struct kvm_userspace_memory_region *mem) 935 struct kvm_userspace_memory_region *mem)
929{ 936{
930 if (mem->slot >= KVM_USER_MEM_SLOTS) 937 if (mem->slot >= KVM_USER_MEM_SLOTS)
931 return -EINVAL; 938 return -EINVAL;
@@ -1047,7 +1054,7 @@ static unsigned long gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn,
1047} 1054}
1048 1055
1049unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, 1056unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot,
1050 gfn_t gfn) 1057 gfn_t gfn)
1051{ 1058{
1052 return gfn_to_hva_many(slot, gfn, NULL); 1059 return gfn_to_hva_many(slot, gfn, NULL);
1053} 1060}
@@ -1387,18 +1394,11 @@ void kvm_release_page_dirty(struct page *page)
1387} 1394}
1388EXPORT_SYMBOL_GPL(kvm_release_page_dirty); 1395EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
1389 1396
1390void kvm_release_pfn_dirty(pfn_t pfn) 1397static void kvm_release_pfn_dirty(pfn_t pfn)
1391{ 1398{
1392 kvm_set_pfn_dirty(pfn); 1399 kvm_set_pfn_dirty(pfn);
1393 kvm_release_pfn_clean(pfn); 1400 kvm_release_pfn_clean(pfn);
1394} 1401}
1395EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1396
1397void kvm_set_page_dirty(struct page *page)
1398{
1399 kvm_set_pfn_dirty(page_to_pfn(page));
1400}
1401EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1402 1402
1403void kvm_set_pfn_dirty(pfn_t pfn) 1403void kvm_set_pfn_dirty(pfn_t pfn)
1404{ 1404{
@@ -1640,8 +1640,9 @@ int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1640} 1640}
1641EXPORT_SYMBOL_GPL(kvm_clear_guest); 1641EXPORT_SYMBOL_GPL(kvm_clear_guest);
1642 1642
1643void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, 1643static void mark_page_dirty_in_slot(struct kvm *kvm,
1644 gfn_t gfn) 1644 struct kvm_memory_slot *memslot,
1645 gfn_t gfn)
1645{ 1646{
1646 if (memslot && memslot->dirty_bitmap) { 1647 if (memslot && memslot->dirty_bitmap) {
1647 unsigned long rel_gfn = gfn - memslot->base_gfn; 1648 unsigned long rel_gfn = gfn - memslot->base_gfn;
@@ -1757,7 +1758,7 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1757 * locking does not harm. It may result in trying to yield to same VCPU, fail 1758 * locking does not harm. It may result in trying to yield to same VCPU, fail
1758 * and continue with next VCPU and so on. 1759 * and continue with next VCPU and so on.
1759 */ 1760 */
1760bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu) 1761static bool kvm_vcpu_eligible_for_directed_yield(struct kvm_vcpu *vcpu)
1761{ 1762{
1762 bool eligible; 1763 bool eligible;
1763 1764