diff options
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r-- | arch/ia64/kvm/kvm-ia64.c | 80 |
1 files changed, 44 insertions, 36 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c index 0ad09f05efa9..7f3c0a2e60cd 100644 --- a/arch/ia64/kvm/kvm-ia64.c +++ b/arch/ia64/kvm/kvm-ia64.c | |||
@@ -23,8 +23,8 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/gfp.h> | ||
27 | #include <linux/fs.h> | 26 | #include <linux/fs.h> |
27 | #include <linux/slab.h> | ||
28 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
29 | #include <linux/kvm_host.h> | 29 | #include <linux/kvm_host.h> |
30 | #include <linux/kvm.h> | 30 | #include <linux/kvm.h> |
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler) | |||
124 | 124 | ||
125 | static DEFINE_SPINLOCK(vp_lock); | 125 | static DEFINE_SPINLOCK(vp_lock); |
126 | 126 | ||
127 | void kvm_arch_hardware_enable(void *garbage) | 127 | int kvm_arch_hardware_enable(void *garbage) |
128 | { | 128 | { |
129 | long status; | 129 | long status; |
130 | long tmp_base; | 130 | long tmp_base; |
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); | 137 | slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); |
138 | local_irq_restore(saved_psr); | 138 | local_irq_restore(saved_psr); |
139 | if (slot < 0) | 139 | if (slot < 0) |
140 | return; | 140 | return -EINVAL; |
141 | 141 | ||
142 | spin_lock(&vp_lock); | 142 | spin_lock(&vp_lock); |
143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? | 143 | status = ia64_pal_vp_init_env(kvm_vsa_base ? |
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage) | |||
145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); | 145 | __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); |
146 | if (status != 0) { | 146 | if (status != 0) { |
147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); | 147 | printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); |
148 | return ; | 148 | return -EINVAL; |
149 | } | 149 | } |
150 | 150 | ||
151 | if (!kvm_vsa_base) { | 151 | if (!kvm_vsa_base) { |
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage) | |||
154 | } | 154 | } |
155 | spin_unlock(&vp_lock); | 155 | spin_unlock(&vp_lock); |
156 | ia64_ptr_entry(0x3, slot); | 156 | ia64_ptr_entry(0x3, slot); |
157 | |||
158 | return 0; | ||
157 | } | 159 | } |
158 | 160 | ||
159 | void kvm_arch_hardware_disable(void *garbage) | 161 | void kvm_arch_hardware_disable(void *garbage) |
@@ -239,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
239 | return 0; | 241 | return 0; |
240 | mmio: | 242 | mmio: |
241 | if (p->dir) | 243 | if (p->dir) |
242 | r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, | 244 | r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr, |
243 | p->size, &p->data); | 245 | p->size, &p->data); |
244 | else | 246 | else |
245 | r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, | 247 | r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr, |
246 | p->size, &p->data); | 248 | p->size, &p->data); |
247 | if (r) | 249 | if (r) |
248 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); | 250 | printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); |
@@ -634,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu) | |||
634 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 636 | static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
635 | { | 637 | { |
636 | union context *host_ctx, *guest_ctx; | 638 | union context *host_ctx, *guest_ctx; |
637 | int r; | 639 | int r, idx; |
638 | 640 | ||
639 | /* | 641 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
640 | * down_read() may sleep and return with interrupts enabled | ||
641 | */ | ||
642 | down_read(&vcpu->kvm->slots_lock); | ||
643 | 642 | ||
644 | again: | 643 | again: |
645 | if (signal_pending(current)) { | 644 | if (signal_pending(current)) { |
@@ -661,7 +660,7 @@ again: | |||
661 | if (r < 0) | 660 | if (r < 0) |
662 | goto vcpu_run_fail; | 661 | goto vcpu_run_fail; |
663 | 662 | ||
664 | up_read(&vcpu->kvm->slots_lock); | 663 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
665 | kvm_guest_enter(); | 664 | kvm_guest_enter(); |
666 | 665 | ||
667 | /* | 666 | /* |
@@ -685,7 +684,7 @@ again: | |||
685 | kvm_guest_exit(); | 684 | kvm_guest_exit(); |
686 | preempt_enable(); | 685 | preempt_enable(); |
687 | 686 | ||
688 | down_read(&vcpu->kvm->slots_lock); | 687 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
689 | 688 | ||
690 | r = kvm_handle_exit(kvm_run, vcpu); | 689 | r = kvm_handle_exit(kvm_run, vcpu); |
691 | 690 | ||
@@ -695,10 +694,10 @@ again: | |||
695 | } | 694 | } |
696 | 695 | ||
697 | out: | 696 | out: |
698 | up_read(&vcpu->kvm->slots_lock); | 697 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
699 | if (r > 0) { | 698 | if (r > 0) { |
700 | kvm_resched(vcpu); | 699 | kvm_resched(vcpu); |
701 | down_read(&vcpu->kvm->slots_lock); | 700 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
702 | goto again; | 701 | goto again; |
703 | } | 702 | } |
704 | 703 | ||
@@ -851,8 +850,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm, | |||
851 | r = 0; | 850 | r = 0; |
852 | switch (chip->chip_id) { | 851 | switch (chip->chip_id) { |
853 | case KVM_IRQCHIP_IOAPIC: | 852 | case KVM_IRQCHIP_IOAPIC: |
854 | memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), | 853 | r = kvm_get_ioapic(kvm, &chip->chip.ioapic); |
855 | sizeof(struct kvm_ioapic_state)); | ||
856 | break; | 854 | break; |
857 | default: | 855 | default: |
858 | r = -EINVAL; | 856 | r = -EINVAL; |
@@ -868,9 +866,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip) | |||
868 | r = 0; | 866 | r = 0; |
869 | switch (chip->chip_id) { | 867 | switch (chip->chip_id) { |
870 | case KVM_IRQCHIP_IOAPIC: | 868 | case KVM_IRQCHIP_IOAPIC: |
871 | memcpy(ioapic_irqchip(kvm), | 869 | r = kvm_set_ioapic(kvm, &chip->chip.ioapic); |
872 | &chip->chip.ioapic, | ||
873 | sizeof(struct kvm_ioapic_state)); | ||
874 | break; | 870 | break; |
875 | default: | 871 | default: |
876 | r = -EINVAL; | 872 | r = -EINVAL; |
@@ -944,7 +940,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
944 | { | 940 | { |
945 | struct kvm *kvm = filp->private_data; | 941 | struct kvm *kvm = filp->private_data; |
946 | void __user *argp = (void __user *)arg; | 942 | void __user *argp = (void __user *)arg; |
947 | int r = -EINVAL; | 943 | int r = -ENOTTY; |
948 | 944 | ||
949 | switch (ioctl) { | 945 | switch (ioctl) { |
950 | case KVM_SET_MEMORY_REGION: { | 946 | case KVM_SET_MEMORY_REGION: { |
@@ -972,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
972 | goto out; | 968 | goto out; |
973 | r = kvm_setup_default_irq_routing(kvm); | 969 | r = kvm_setup_default_irq_routing(kvm); |
974 | if (r) { | 970 | if (r) { |
975 | kfree(kvm->arch.vioapic); | 971 | kvm_ioapic_destroy(kvm); |
976 | goto out; | 972 | goto out; |
977 | } | 973 | } |
978 | break; | 974 | break; |
@@ -985,10 +981,8 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
985 | goto out; | 981 | goto out; |
986 | if (irqchip_in_kernel(kvm)) { | 982 | if (irqchip_in_kernel(kvm)) { |
987 | __s32 status; | 983 | __s32 status; |
988 | mutex_lock(&kvm->irq_lock); | ||
989 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | 984 | status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, |
990 | irq_event.irq, irq_event.level); | 985 | irq_event.irq, irq_event.level); |
991 | mutex_unlock(&kvm->irq_lock); | ||
992 | if (ioctl == KVM_IRQ_LINE_STATUS) { | 986 | if (ioctl == KVM_IRQ_LINE_STATUS) { |
993 | irq_event.status = status; | 987 | irq_event.status = status; |
994 | if (copy_to_user(argp, &irq_event, | 988 | if (copy_to_user(argp, &irq_event, |
@@ -1380,12 +1374,14 @@ static void free_kvm(struct kvm *kvm) | |||
1380 | 1374 | ||
1381 | static void kvm_release_vm_pages(struct kvm *kvm) | 1375 | static void kvm_release_vm_pages(struct kvm *kvm) |
1382 | { | 1376 | { |
1377 | struct kvm_memslots *slots; | ||
1383 | struct kvm_memory_slot *memslot; | 1378 | struct kvm_memory_slot *memslot; |
1384 | int i, j; | 1379 | int i, j; |
1385 | unsigned long base_gfn; | 1380 | unsigned long base_gfn; |
1386 | 1381 | ||
1387 | for (i = 0; i < kvm->nmemslots; i++) { | 1382 | slots = rcu_dereference(kvm->memslots); |
1388 | memslot = &kvm->memslots[i]; | 1383 | for (i = 0; i < slots->nmemslots; i++) { |
1384 | memslot = &slots->memslots[i]; | ||
1389 | base_gfn = memslot->base_gfn; | 1385 | base_gfn = memslot->base_gfn; |
1390 | 1386 | ||
1391 | for (j = 0; j < memslot->npages; j++) { | 1387 | for (j = 0; j < memslot->npages; j++) { |
@@ -1408,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
1408 | kfree(kvm->arch.vioapic); | 1404 | kfree(kvm->arch.vioapic); |
1409 | kvm_release_vm_pages(kvm); | 1405 | kvm_release_vm_pages(kvm); |
1410 | kvm_free_physmem(kvm); | 1406 | kvm_free_physmem(kvm); |
1407 | cleanup_srcu_struct(&kvm->srcu); | ||
1411 | free_kvm(kvm); | 1408 | free_kvm(kvm); |
1412 | } | 1409 | } |
1413 | 1410 | ||
@@ -1579,15 +1576,15 @@ out: | |||
1579 | return r; | 1576 | return r; |
1580 | } | 1577 | } |
1581 | 1578 | ||
1582 | int kvm_arch_set_memory_region(struct kvm *kvm, | 1579 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
1583 | struct kvm_userspace_memory_region *mem, | 1580 | struct kvm_memory_slot *memslot, |
1584 | struct kvm_memory_slot old, | 1581 | struct kvm_memory_slot old, |
1582 | struct kvm_userspace_memory_region *mem, | ||
1585 | int user_alloc) | 1583 | int user_alloc) |
1586 | { | 1584 | { |
1587 | unsigned long i; | 1585 | unsigned long i; |
1588 | unsigned long pfn; | 1586 | unsigned long pfn; |
1589 | int npages = mem->memory_size >> PAGE_SHIFT; | 1587 | int npages = memslot->npages; |
1590 | struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot]; | ||
1591 | unsigned long base_gfn = memslot->base_gfn; | 1588 | unsigned long base_gfn = memslot->base_gfn; |
1592 | 1589 | ||
1593 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) | 1590 | if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) |
@@ -1611,6 +1608,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm, | |||
1611 | return 0; | 1608 | return 0; |
1612 | } | 1609 | } |
1613 | 1610 | ||
1611 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
1612 | struct kvm_userspace_memory_region *mem, | ||
1613 | struct kvm_memory_slot old, | ||
1614 | int user_alloc) | ||
1615 | { | ||
1616 | return; | ||
1617 | } | ||
1618 | |||
1614 | void kvm_arch_flush_shadow(struct kvm *kvm) | 1619 | void kvm_arch_flush_shadow(struct kvm *kvm) |
1615 | { | 1620 | { |
1616 | kvm_flush_remote_tlbs(kvm); | 1621 | kvm_flush_remote_tlbs(kvm); |
@@ -1797,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1797 | { | 1802 | { |
1798 | struct kvm_memory_slot *memslot; | 1803 | struct kvm_memory_slot *memslot; |
1799 | int r, i; | 1804 | int r, i; |
1800 | long n, base; | 1805 | long base; |
1806 | unsigned long n; | ||
1801 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + | 1807 | unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + |
1802 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); | 1808 | offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); |
1803 | 1809 | ||
@@ -1805,12 +1811,12 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm, | |||
1805 | if (log->slot >= KVM_MEMORY_SLOTS) | 1811 | if (log->slot >= KVM_MEMORY_SLOTS) |
1806 | goto out; | 1812 | goto out; |
1807 | 1813 | ||
1808 | memslot = &kvm->memslots[log->slot]; | 1814 | memslot = &kvm->memslots->memslots[log->slot]; |
1809 | r = -ENOENT; | 1815 | r = -ENOENT; |
1810 | if (!memslot->dirty_bitmap) | 1816 | if (!memslot->dirty_bitmap) |
1811 | goto out; | 1817 | goto out; |
1812 | 1818 | ||
1813 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1819 | n = kvm_dirty_bitmap_bytes(memslot); |
1814 | base = memslot->base_gfn / BITS_PER_LONG; | 1820 | base = memslot->base_gfn / BITS_PER_LONG; |
1815 | 1821 | ||
1816 | for (i = 0; i < n/sizeof(long); ++i) { | 1822 | for (i = 0; i < n/sizeof(long); ++i) { |
@@ -1826,10 +1832,11 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1826 | struct kvm_dirty_log *log) | 1832 | struct kvm_dirty_log *log) |
1827 | { | 1833 | { |
1828 | int r; | 1834 | int r; |
1829 | int n; | 1835 | unsigned long n; |
1830 | struct kvm_memory_slot *memslot; | 1836 | struct kvm_memory_slot *memslot; |
1831 | int is_dirty = 0; | 1837 | int is_dirty = 0; |
1832 | 1838 | ||
1839 | mutex_lock(&kvm->slots_lock); | ||
1833 | spin_lock(&kvm->arch.dirty_log_lock); | 1840 | spin_lock(&kvm->arch.dirty_log_lock); |
1834 | 1841 | ||
1835 | r = kvm_ia64_sync_dirty_log(kvm, log); | 1842 | r = kvm_ia64_sync_dirty_log(kvm, log); |
@@ -1843,12 +1850,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |||
1843 | /* If nothing is dirty, don't bother messing with page tables. */ | 1850 | /* If nothing is dirty, don't bother messing with page tables. */ |
1844 | if (is_dirty) { | 1851 | if (is_dirty) { |
1845 | kvm_flush_remote_tlbs(kvm); | 1852 | kvm_flush_remote_tlbs(kvm); |
1846 | memslot = &kvm->memslots[log->slot]; | 1853 | memslot = &kvm->memslots->memslots[log->slot]; |
1847 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | 1854 | n = kvm_dirty_bitmap_bytes(memslot); |
1848 | memset(memslot->dirty_bitmap, 0, n); | 1855 | memset(memslot->dirty_bitmap, 0, n); |
1849 | } | 1856 | } |
1850 | r = 0; | 1857 | r = 0; |
1851 | out: | 1858 | out: |
1859 | mutex_unlock(&kvm->slots_lock); | ||
1852 | spin_unlock(&kvm->arch.dirty_log_lock); | 1860 | spin_unlock(&kvm->arch.dirty_log_lock); |
1853 | return r; | 1861 | return r; |
1854 | } | 1862 | } |