aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/ia64/kvm
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/asm-offsets.c1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c80
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/mmio.c4
-rw-r--r--arch/ia64/kvm/vcpu.c4
-rw-r--r--arch/ia64/kvm/vcpu.h9
-rw-r--r--arch/ia64/kvm/vmm.c4
-rw-r--r--arch/ia64/kvm/vtlb.c2
10 files changed, 73 insertions, 63 deletions
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index ef3e7be29caf..fa4d1e59deb0 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select ANON_INODES 26 select ANON_INODES
27 select HAVE_KVM_IRQCHIP 27 select HAVE_KVM_IRQCHIP
28 select KVM_APIC_ARCHITECTURE 28 select KVM_APIC_ARCHITECTURE
29 select KVM_MMIO
29 ---help--- 30 ---help---
30 Support hosting fully virtualized guest machines using hardware 31 Support hosting fully virtualized guest machines using hardware
31 virtualization extensions. You will need a fairly recent 32 virtualization extensions. You will need a fairly recent
@@ -47,6 +48,7 @@ config KVM_INTEL
47 Provides support for KVM on Itanium 2 processors equipped with the VT 48 Provides support for KVM on Itanium 2 processors equipped with the VT
48 extensions. 49 extensions.
49 50
51source drivers/vhost/Kconfig
50source drivers/virtio/Kconfig 52source drivers/virtio/Kconfig
51 53
52endif # VIRTUALIZATION 54endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 0bb99b732908..1089b3e918ac 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
50 50
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
52 coalesced_mmio.o irq_comm.o) 52 coalesced_mmio.o irq_comm.o assigned-dev.o)
53 53
54ifeq ($(CONFIG_IOMMU_API),y) 54ifeq ($(CONFIG_IOMMU_API),y)
55common-objs += $(addprefix ../../../virt/kvm/, iommu.o) 55common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 0c3564a7a033..9324c875caf5 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/autoconf.h>
26#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
27#include <linux/kbuild.h> 26#include <linux/kbuild.h>
28 27
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0ad09f05efa9..7f3c0a2e60cd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -23,8 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/kvm_host.h> 29#include <linux/kvm_host.h>
30#include <linux/kvm.h> 30#include <linux/kvm.h>
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
124 124
125static DEFINE_SPINLOCK(vp_lock); 125static DEFINE_SPINLOCK(vp_lock);
126 126
127void kvm_arch_hardware_enable(void *garbage) 127int kvm_arch_hardware_enable(void *garbage)
128{ 128{
129 long status; 129 long status;
130 long tmp_base; 130 long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr); 138 local_irq_restore(saved_psr);
139 if (slot < 0) 139 if (slot < 0)
140 return; 140 return -EINVAL;
141 141
142 spin_lock(&vp_lock); 142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ? 143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ; 148 return -EINVAL;
149 } 149 }
150 150
151 if (!kvm_vsa_base) { 151 if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
154 } 154 }
155 spin_unlock(&vp_lock); 155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot); 156 ia64_ptr_entry(0x3, slot);
157
158 return 0;
157} 159}
158 160
159void kvm_arch_hardware_disable(void *garbage) 161void kvm_arch_hardware_disable(void *garbage)
@@ -239,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
239 return 0; 241 return 0;
240mmio: 242mmio:
241 if (p->dir) 243 if (p->dir)
242 r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, 244 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
243 p->size, &p->data); 245 p->size, &p->data);
244 else 246 else
245 r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, 247 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
246 p->size, &p->data); 248 p->size, &p->data);
247 if (r) 249 if (r)
248 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); 250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -634,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
634static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
635{ 637{
636 union context *host_ctx, *guest_ctx; 638 union context *host_ctx, *guest_ctx;
637 int r; 639 int r, idx;
638 640
639 /* 641 idx = srcu_read_lock(&vcpu->kvm->srcu);
640 * down_read() may sleep and return with interrupts enabled
641 */
642 down_read(&vcpu->kvm->slots_lock);
643 642
644again: 643again:
645 if (signal_pending(current)) { 644 if (signal_pending(current)) {
@@ -661,7 +660,7 @@ again:
661 if (r < 0) 660 if (r < 0)
662 goto vcpu_run_fail; 661 goto vcpu_run_fail;
663 662
664 up_read(&vcpu->kvm->slots_lock); 663 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 kvm_guest_enter(); 664 kvm_guest_enter();
666 665
667 /* 666 /*
@@ -685,7 +684,7 @@ again:
685 kvm_guest_exit(); 684 kvm_guest_exit();
686 preempt_enable(); 685 preempt_enable();
687 686
688 down_read(&vcpu->kvm->slots_lock); 687 idx = srcu_read_lock(&vcpu->kvm->srcu);
689 688
690 r = kvm_handle_exit(kvm_run, vcpu); 689 r = kvm_handle_exit(kvm_run, vcpu);
691 690
@@ -695,10 +694,10 @@ again:
695 } 694 }
696 695
697out: 696out:
698 up_read(&vcpu->kvm->slots_lock); 697 srcu_read_unlock(&vcpu->kvm->srcu, idx);
699 if (r > 0) { 698 if (r > 0) {
700 kvm_resched(vcpu); 699 kvm_resched(vcpu);
701 down_read(&vcpu->kvm->slots_lock); 700 idx = srcu_read_lock(&vcpu->kvm->srcu);
702 goto again; 701 goto again;
703 } 702 }
704 703
@@ -851,8 +850,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
851 r = 0; 850 r = 0;
852 switch (chip->chip_id) { 851 switch (chip->chip_id) {
853 case KVM_IRQCHIP_IOAPIC: 852 case KVM_IRQCHIP_IOAPIC:
854 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), 853 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
855 sizeof(struct kvm_ioapic_state));
856 break; 854 break;
857 default: 855 default:
858 r = -EINVAL; 856 r = -EINVAL;
@@ -868,9 +866,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
868 r = 0; 866 r = 0;
869 switch (chip->chip_id) { 867 switch (chip->chip_id) {
870 case KVM_IRQCHIP_IOAPIC: 868 case KVM_IRQCHIP_IOAPIC:
871 memcpy(ioapic_irqchip(kvm), 869 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
872 &chip->chip.ioapic,
873 sizeof(struct kvm_ioapic_state));
874 break; 870 break;
875 default: 871 default:
876 r = -EINVAL; 872 r = -EINVAL;
@@ -944,7 +940,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
944{ 940{
945 struct kvm *kvm = filp->private_data; 941 struct kvm *kvm = filp->private_data;
946 void __user *argp = (void __user *)arg; 942 void __user *argp = (void __user *)arg;
947 int r = -EINVAL; 943 int r = -ENOTTY;
948 944
949 switch (ioctl) { 945 switch (ioctl) {
950 case KVM_SET_MEMORY_REGION: { 946 case KVM_SET_MEMORY_REGION: {
@@ -972,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
972 goto out; 968 goto out;
973 r = kvm_setup_default_irq_routing(kvm); 969 r = kvm_setup_default_irq_routing(kvm);
974 if (r) { 970 if (r) {
975 kfree(kvm->arch.vioapic); 971 kvm_ioapic_destroy(kvm);
976 goto out; 972 goto out;
977 } 973 }
978 break; 974 break;
@@ -985,10 +981,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
985 goto out; 981 goto out;
986 if (irqchip_in_kernel(kvm)) { 982 if (irqchip_in_kernel(kvm)) {
987 __s32 status; 983 __s32 status;
988 mutex_lock(&kvm->irq_lock);
989 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 984 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
990 irq_event.irq, irq_event.level); 985 irq_event.irq, irq_event.level);
991 mutex_unlock(&kvm->irq_lock);
992 if (ioctl == KVM_IRQ_LINE_STATUS) { 986 if (ioctl == KVM_IRQ_LINE_STATUS) {
993 irq_event.status = status; 987 irq_event.status = status;
994 if (copy_to_user(argp, &irq_event, 988 if (copy_to_user(argp, &irq_event,
@@ -1380,12 +1374,14 @@ static void free_kvm(struct kvm *kvm)
1380 1374
1381static void kvm_release_vm_pages(struct kvm *kvm) 1375static void kvm_release_vm_pages(struct kvm *kvm)
1382{ 1376{
1377 struct kvm_memslots *slots;
1383 struct kvm_memory_slot *memslot; 1378 struct kvm_memory_slot *memslot;
1384 int i, j; 1379 int i, j;
1385 unsigned long base_gfn; 1380 unsigned long base_gfn;
1386 1381
1387 for (i = 0; i < kvm->nmemslots; i++) { 1382 slots = rcu_dereference(kvm->memslots);
1388 memslot = &kvm->memslots[i]; 1383 for (i = 0; i < slots->nmemslots; i++) {
1384 memslot = &slots->memslots[i];
1389 base_gfn = memslot->base_gfn; 1385 base_gfn = memslot->base_gfn;
1390 1386
1391 for (j = 0; j < memslot->npages; j++) { 1387 for (j = 0; j < memslot->npages; j++) {
@@ -1408,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1408 kfree(kvm->arch.vioapic); 1404 kfree(kvm->arch.vioapic);
1409 kvm_release_vm_pages(kvm); 1405 kvm_release_vm_pages(kvm);
1410 kvm_free_physmem(kvm); 1406 kvm_free_physmem(kvm);
1407 cleanup_srcu_struct(&kvm->srcu);
1411 free_kvm(kvm); 1408 free_kvm(kvm);
1412} 1409}
1413 1410
@@ -1579,15 +1576,15 @@ out:
1579 return r; 1576 return r;
1580} 1577}
1581 1578
1582int kvm_arch_set_memory_region(struct kvm *kvm, 1579int kvm_arch_prepare_memory_region(struct kvm *kvm,
1583 struct kvm_userspace_memory_region *mem, 1580 struct kvm_memory_slot *memslot,
1584 struct kvm_memory_slot old, 1581 struct kvm_memory_slot old,
1582 struct kvm_userspace_memory_region *mem,
1585 int user_alloc) 1583 int user_alloc)
1586{ 1584{
1587 unsigned long i; 1585 unsigned long i;
1588 unsigned long pfn; 1586 unsigned long pfn;
1589 int npages = mem->memory_size >> PAGE_SHIFT; 1587 int npages = memslot->npages;
1590 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1591 unsigned long base_gfn = memslot->base_gfn; 1588 unsigned long base_gfn = memslot->base_gfn;
1592 1589
1593 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) 1590 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1611,6 +1608,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1611 return 0; 1608 return 0;
1612} 1609}
1613 1610
1611void kvm_arch_commit_memory_region(struct kvm *kvm,
1612 struct kvm_userspace_memory_region *mem,
1613 struct kvm_memory_slot old,
1614 int user_alloc)
1615{
1616 return;
1617}
1618
1614void kvm_arch_flush_shadow(struct kvm *kvm) 1619void kvm_arch_flush_shadow(struct kvm *kvm)
1615{ 1620{
1616 kvm_flush_remote_tlbs(kvm); 1621 kvm_flush_remote_tlbs(kvm);
@@ -1797,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1797{ 1802{
1798 struct kvm_memory_slot *memslot; 1803 struct kvm_memory_slot *memslot;
1799 int r, i; 1804 int r, i;
1800 long n, base; 1805 long base;
1806 unsigned long n;
1801 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1807 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1802 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1808 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1803 1809
@@ -1805,12 +1811,12 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1805 if (log->slot >= KVM_MEMORY_SLOTS) 1811 if (log->slot >= KVM_MEMORY_SLOTS)
1806 goto out; 1812 goto out;
1807 1813
1808 memslot = &kvm->memslots[log->slot]; 1814 memslot = &kvm->memslots->memslots[log->slot];
1809 r = -ENOENT; 1815 r = -ENOENT;
1810 if (!memslot->dirty_bitmap) 1816 if (!memslot->dirty_bitmap)
1811 goto out; 1817 goto out;
1812 1818
1813 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1819 n = kvm_dirty_bitmap_bytes(memslot);
1814 base = memslot->base_gfn / BITS_PER_LONG; 1820 base = memslot->base_gfn / BITS_PER_LONG;
1815 1821
1816 for (i = 0; i < n/sizeof(long); ++i) { 1822 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1826,10 +1832,11 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1826 struct kvm_dirty_log *log) 1832 struct kvm_dirty_log *log)
1827{ 1833{
1828 int r; 1834 int r;
1829 int n; 1835 unsigned long n;
1830 struct kvm_memory_slot *memslot; 1836 struct kvm_memory_slot *memslot;
1831 int is_dirty = 0; 1837 int is_dirty = 0;
1832 1838
1839 mutex_lock(&kvm->slots_lock);
1833 spin_lock(&kvm->arch.dirty_log_lock); 1840 spin_lock(&kvm->arch.dirty_log_lock);
1834 1841
1835 r = kvm_ia64_sync_dirty_log(kvm, log); 1842 r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1843,12 +1850,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1843 /* If nothing is dirty, don't bother messing with page tables. */ 1850 /* If nothing is dirty, don't bother messing with page tables. */
1844 if (is_dirty) { 1851 if (is_dirty) {
1845 kvm_flush_remote_tlbs(kvm); 1852 kvm_flush_remote_tlbs(kvm);
1846 memslot = &kvm->memslots[log->slot]; 1853 memslot = &kvm->memslots->memslots[log->slot];
1847 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1854 n = kvm_dirty_bitmap_bytes(memslot);
1848 memset(memslot->dirty_bitmap, 0, n); 1855 memset(memslot->dirty_bitmap, 0, n);
1849 } 1856 }
1850 r = 0; 1857 r = 0;
1851out: 1858out:
1859 mutex_unlock(&kvm->slots_lock);
1852 spin_unlock(&kvm->arch.dirty_log_lock); 1860 spin_unlock(&kvm->arch.dirty_log_lock);
1853 return r; 1861 return r;
1854} 1862}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index e4b82319881d..cb548ee9fcae 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu,
75 struct exit_ctl_data *p; 75 struct exit_ctl_data *p;
76 76
77 p = kvm_get_exit_data(vcpu); 77 p = kvm_get_exit_data(vcpu);
78 if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { 78 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
79 p->u.pal_data.ret = result; 79 p->u.pal_data.ret = result;
80 return ; 80 return ;
81 } 81 }
@@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu,
87 struct exit_ctl_data *p; 87 struct exit_ctl_data *p;
88 88
89 p = kvm_get_exit_data(vcpu); 89 p = kvm_get_exit_data(vcpu);
90 if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { 90 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
91 p->u.sal_data.ret = result; 91 p->u.sal_data.ret = result;
92 return ; 92 return ;
93 } 93 }
@@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
322 struct exit_ctl_data *p; 322 struct exit_ctl_data *p;
323 323
324 p = kvm_get_exit_data(vcpu); 324 p = kvm_get_exit_data(vcpu);
325 if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) 325 if (p->exit_reason == EXIT_REASON_PAL_CALL)
326 index = p->u.pal_data.gr28; 326 index = p->u.pal_data.gr28;
327 327
328 return index; 328 return index;
@@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
646 646
647 p = kvm_get_exit_data(vcpu); 647 p = kvm_get_exit_data(vcpu);
648 648
649 if (p) { 649 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
650 if (p->exit_reason == EXIT_REASON_SAL_CALL) { 650 *in0 = p->u.sal_data.in0;
651 *in0 = p->u.sal_data.in0; 651 *in1 = p->u.sal_data.in1;
652 *in1 = p->u.sal_data.in1; 652 *in2 = p->u.sal_data.in2;
653 *in2 = p->u.sal_data.in2; 653 *in3 = p->u.sal_data.in3;
654 *in3 = p->u.sal_data.in3; 654 *in4 = p->u.sal_data.in4;
655 *in4 = p->u.sal_data.in4; 655 *in5 = p->u.sal_data.in5;
656 *in5 = p->u.sal_data.in5; 656 *in6 = p->u.sal_data.in6;
657 *in6 = p->u.sal_data.in6; 657 *in7 = p->u.sal_data.in7;
658 *in7 = p->u.sal_data.in7; 658 return ;
659 return ;
660 }
661 } 659 }
662 *in0 = 0; 660 *in0 = 0;
663} 661}
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 9bf55afd08d0..fb8f9f59a1ed 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
316 return; 316 return;
317 } else { 317 } else {
318 inst_type = -1; 318 inst_type = -1;
319 panic_vm(vcpu, "Unsupported MMIO access instruction! \ 319 panic_vm(vcpu, "Unsupported MMIO access instruction! "
320 Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", 320 "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
321 bundle.i64[0], bundle.i64[1]); 321 bundle.i64[0], bundle.i64[1]);
322 } 322 }
323 323
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index dce75b70cdd5..958815c9787d 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639 * Otherwise panic 1639 * Otherwise panic
1640 */ 1640 */
1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) 1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ 1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
1643 & vpsr.is=0\n"); 1643 "& vpsr.is=0\n");
1644 1644
1645 /* 1645 /*
1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia 1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae69..988911b4cc7a 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
388#define _vmm_raw_spin_lock(x) do {}while(0) 388#define _vmm_raw_spin_lock(x) do {}while(0)
389#define _vmm_raw_spin_unlock(x) do {}while(0) 389#define _vmm_raw_spin_unlock(x) do {}while(0)
390#else 390#else
391typedef struct {
392 volatile unsigned int lock;
393} vmm_spinlock_t;
391#define _vmm_raw_spin_lock(x) \ 394#define _vmm_raw_spin_lock(x) \
392 do { \ 395 do { \
393 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ 396 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
405 408
406#define _vmm_raw_spin_unlock(x) \ 409#define _vmm_raw_spin_unlock(x) \
407 do { barrier(); \ 410 do { barrier(); \
408 ((spinlock_t *)x)->raw_lock.lock = 0; } \ 411 ((vmm_spinlock_t *)x)->lock = 0; } \
409while (0) 412while (0)
410#endif 413#endif
411 414
412void vmm_spin_lock(spinlock_t *lock); 415void vmm_spin_lock(vmm_spinlock_t *lock);
413void vmm_spin_unlock(spinlock_t *lock); 416void vmm_spin_unlock(vmm_spinlock_t *lock);
414enum { 417enum {
415 I_TLB = 1, 418 I_TLB = 1,
416 D_TLB = 2 419 D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6c..7a62f75778c5 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
60 return ; 60 return ;
61} 61}
62 62
63void vmm_spin_lock(spinlock_t *lock) 63void vmm_spin_lock(vmm_spinlock_t *lock)
64{ 64{
65 _vmm_raw_spin_lock(lock); 65 _vmm_raw_spin_lock(lock);
66} 66}
67 67
68void vmm_spin_unlock(spinlock_t *lock) 68void vmm_spin_unlock(vmm_spinlock_t *lock)
69{ 69{
70 _vmm_raw_spin_unlock(lock); 70 _vmm_raw_spin_unlock(lock);
71} 71}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6e..4332f7ee5203 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{ 182{
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;