aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm/kvm-ia64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kvm/kvm-ia64.c')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c68
1 files changed, 42 insertions, 26 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 5fdeec5fddcf..21b701374f72 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -23,8 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/kvm_host.h> 29#include <linux/kvm_host.h>
30#include <linux/kvm.h> 30#include <linux/kvm.h>
@@ -144,6 +144,7 @@ int kvm_arch_hardware_enable(void *garbage)
144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE, 144 VP_INIT_ENV : VP_INIT_ENV_INITALIZE,
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 spin_unlock(&vp_lock);
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 148 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return -EINVAL; 149 return -EINVAL;
149 } 150 }
@@ -241,10 +242,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
241 return 0; 242 return 0;
242mmio: 243mmio:
243 if (p->dir) 244 if (p->dir)
244 r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, 245 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
245 p->size, &p->data); 246 p->size, &p->data);
246 else 247 else
247 r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, 248 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
248 p->size, &p->data); 249 p->size, &p->data);
249 if (r) 250 if (r)
250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); 251 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -636,12 +637,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 637static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
637{ 638{
638 union context *host_ctx, *guest_ctx; 639 union context *host_ctx, *guest_ctx;
639 int r; 640 int r, idx;
640 641
641 /* 642 idx = srcu_read_lock(&vcpu->kvm->srcu);
642 * down_read() may sleep and return with interrupts enabled
643 */
644 down_read(&vcpu->kvm->slots_lock);
645 643
646again: 644again:
647 if (signal_pending(current)) { 645 if (signal_pending(current)) {
@@ -663,7 +661,7 @@ again:
663 if (r < 0) 661 if (r < 0)
664 goto vcpu_run_fail; 662 goto vcpu_run_fail;
665 663
666 up_read(&vcpu->kvm->slots_lock); 664 srcu_read_unlock(&vcpu->kvm->srcu, idx);
667 kvm_guest_enter(); 665 kvm_guest_enter();
668 666
669 /* 667 /*
@@ -687,7 +685,7 @@ again:
687 kvm_guest_exit(); 685 kvm_guest_exit();
688 preempt_enable(); 686 preempt_enable();
689 687
690 down_read(&vcpu->kvm->slots_lock); 688 idx = srcu_read_lock(&vcpu->kvm->srcu);
691 689
692 r = kvm_handle_exit(kvm_run, vcpu); 690 r = kvm_handle_exit(kvm_run, vcpu);
693 691
@@ -697,10 +695,10 @@ again:
697 } 695 }
698 696
699out: 697out:
700 up_read(&vcpu->kvm->slots_lock); 698 srcu_read_unlock(&vcpu->kvm->srcu, idx);
701 if (r > 0) { 699 if (r > 0) {
702 kvm_resched(vcpu); 700 kvm_resched(vcpu);
703 down_read(&vcpu->kvm->slots_lock); 701 idx = srcu_read_lock(&vcpu->kvm->srcu);
704 goto again; 702 goto again;
705 } 703 }
706 704
@@ -971,7 +969,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
971 goto out; 969 goto out;
972 r = kvm_setup_default_irq_routing(kvm); 970 r = kvm_setup_default_irq_routing(kvm);
973 if (r) { 971 if (r) {
974 kfree(kvm->arch.vioapic); 972 kvm_ioapic_destroy(kvm);
975 goto out; 973 goto out;
976 } 974 }
977 break; 975 break;
@@ -982,11 +980,13 @@ long kvm_arch_vm_ioctl(struct file *filp,
982 r = -EFAULT; 980 r = -EFAULT;
983 if (copy_from_user(&irq_event, argp, sizeof irq_event)) 981 if (copy_from_user(&irq_event, argp, sizeof irq_event))
984 goto out; 982 goto out;
983 r = -ENXIO;
985 if (irqchip_in_kernel(kvm)) { 984 if (irqchip_in_kernel(kvm)) {
986 __s32 status; 985 __s32 status;
987 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 986 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
988 irq_event.irq, irq_event.level); 987 irq_event.irq, irq_event.level);
989 if (ioctl == KVM_IRQ_LINE_STATUS) { 988 if (ioctl == KVM_IRQ_LINE_STATUS) {
989 r = -EFAULT;
990 irq_event.status = status; 990 irq_event.status = status;
991 if (copy_to_user(argp, &irq_event, 991 if (copy_to_user(argp, &irq_event,
992 sizeof irq_event)) 992 sizeof irq_event))
@@ -1377,12 +1377,14 @@ static void free_kvm(struct kvm *kvm)
1377 1377
1378static void kvm_release_vm_pages(struct kvm *kvm) 1378static void kvm_release_vm_pages(struct kvm *kvm)
1379{ 1379{
1380 struct kvm_memslots *slots;
1380 struct kvm_memory_slot *memslot; 1381 struct kvm_memory_slot *memslot;
1381 int i, j; 1382 int i, j;
1382 unsigned long base_gfn; 1383 unsigned long base_gfn;
1383 1384
1384 for (i = 0; i < kvm->nmemslots; i++) { 1385 slots = kvm_memslots(kvm);
1385 memslot = &kvm->memslots[i]; 1386 for (i = 0; i < slots->nmemslots; i++) {
1387 memslot = &slots->memslots[i];
1386 base_gfn = memslot->base_gfn; 1388 base_gfn = memslot->base_gfn;
1387 1389
1388 for (j = 0; j < memslot->npages; j++) { 1390 for (j = 0; j < memslot->npages; j++) {
@@ -1405,6 +1407,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1405 kfree(kvm->arch.vioapic); 1407 kfree(kvm->arch.vioapic);
1406 kvm_release_vm_pages(kvm); 1408 kvm_release_vm_pages(kvm);
1407 kvm_free_physmem(kvm); 1409 kvm_free_physmem(kvm);
1410 cleanup_srcu_struct(&kvm->srcu);
1408 free_kvm(kvm); 1411 free_kvm(kvm);
1409} 1412}
1410 1413
@@ -1535,8 +1538,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
1535 goto out; 1538 goto out;
1536 1539
1537 if (copy_to_user(user_stack, stack, 1540 if (copy_to_user(user_stack, stack,
1538 sizeof(struct kvm_ia64_vcpu_stack))) 1541 sizeof(struct kvm_ia64_vcpu_stack))) {
1542 r = -EFAULT;
1539 goto out; 1543 goto out;
1544 }
1540 1545
1541 break; 1546 break;
1542 } 1547 }
@@ -1576,15 +1581,15 @@ out:
1576 return r; 1581 return r;
1577} 1582}
1578 1583
1579int kvm_arch_set_memory_region(struct kvm *kvm, 1584int kvm_arch_prepare_memory_region(struct kvm *kvm,
1580 struct kvm_userspace_memory_region *mem, 1585 struct kvm_memory_slot *memslot,
1581 struct kvm_memory_slot old, 1586 struct kvm_memory_slot old,
1587 struct kvm_userspace_memory_region *mem,
1582 int user_alloc) 1588 int user_alloc)
1583{ 1589{
1584 unsigned long i; 1590 unsigned long i;
1585 unsigned long pfn; 1591 unsigned long pfn;
1586 int npages = mem->memory_size >> PAGE_SHIFT; 1592 int npages = memslot->npages;
1587 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1588 unsigned long base_gfn = memslot->base_gfn; 1593 unsigned long base_gfn = memslot->base_gfn;
1589 1594
1590 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) 1595 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1608,6 +1613,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1608 return 0; 1613 return 0;
1609} 1614}
1610 1615
1616void kvm_arch_commit_memory_region(struct kvm *kvm,
1617 struct kvm_userspace_memory_region *mem,
1618 struct kvm_memory_slot old,
1619 int user_alloc)
1620{
1621 return;
1622}
1623
1611void kvm_arch_flush_shadow(struct kvm *kvm) 1624void kvm_arch_flush_shadow(struct kvm *kvm)
1612{ 1625{
1613 kvm_flush_remote_tlbs(kvm); 1626 kvm_flush_remote_tlbs(kvm);
@@ -1794,7 +1807,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1794{ 1807{
1795 struct kvm_memory_slot *memslot; 1808 struct kvm_memory_slot *memslot;
1796 int r, i; 1809 int r, i;
1797 long n, base; 1810 long base;
1811 unsigned long n;
1798 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1812 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1799 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1813 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1800 1814
@@ -1802,12 +1816,12 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1802 if (log->slot >= KVM_MEMORY_SLOTS) 1816 if (log->slot >= KVM_MEMORY_SLOTS)
1803 goto out; 1817 goto out;
1804 1818
1805 memslot = &kvm->memslots[log->slot]; 1819 memslot = &kvm->memslots->memslots[log->slot];
1806 r = -ENOENT; 1820 r = -ENOENT;
1807 if (!memslot->dirty_bitmap) 1821 if (!memslot->dirty_bitmap)
1808 goto out; 1822 goto out;
1809 1823
1810 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1824 n = kvm_dirty_bitmap_bytes(memslot);
1811 base = memslot->base_gfn / BITS_PER_LONG; 1825 base = memslot->base_gfn / BITS_PER_LONG;
1812 1826
1813 for (i = 0; i < n/sizeof(long); ++i) { 1827 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1823,10 +1837,11 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1823 struct kvm_dirty_log *log) 1837 struct kvm_dirty_log *log)
1824{ 1838{
1825 int r; 1839 int r;
1826 int n; 1840 unsigned long n;
1827 struct kvm_memory_slot *memslot; 1841 struct kvm_memory_slot *memslot;
1828 int is_dirty = 0; 1842 int is_dirty = 0;
1829 1843
1844 mutex_lock(&kvm->slots_lock);
1830 spin_lock(&kvm->arch.dirty_log_lock); 1845 spin_lock(&kvm->arch.dirty_log_lock);
1831 1846
1832 r = kvm_ia64_sync_dirty_log(kvm, log); 1847 r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1840,12 +1855,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1840 /* If nothing is dirty, don't bother messing with page tables. */ 1855 /* If nothing is dirty, don't bother messing with page tables. */
1841 if (is_dirty) { 1856 if (is_dirty) {
1842 kvm_flush_remote_tlbs(kvm); 1857 kvm_flush_remote_tlbs(kvm);
1843 memslot = &kvm->memslots[log->slot]; 1858 memslot = &kvm->memslots->memslots[log->slot];
1844 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1859 n = kvm_dirty_bitmap_bytes(memslot);
1845 memset(memslot->dirty_bitmap, 0, n); 1860 memset(memslot->dirty_bitmap, 0, n);
1846 } 1861 }
1847 r = 0; 1862 r = 0;
1848out: 1863out:
1864 mutex_unlock(&kvm->slots_lock);
1849 spin_unlock(&kvm->arch.dirty_log_lock); 1865 spin_unlock(&kvm->arch.dirty_log_lock);
1850 return r; 1866 return r;
1851} 1867}