aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/assigned-dev.c7
-rw-r--r--virt/kvm/coalesced_mmio.c1
-rw-r--r--virt/kvm/eventfd.c1
-rw-r--r--virt/kvm/ioapic.c3
-rw-r--r--virt/kvm/iommu.c12
-rw-r--r--virt/kvm/irq_comm.c15
-rw-r--r--virt/kvm/kvm_main.c106
7 files changed, 103 insertions, 42 deletions
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 4d10b1e047f4..7c98928b09d9 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Kernel-based Virtual Machine - device assignment support 2 * Kernel-based Virtual Machine - device assignment support
3 * 3 *
4 * Copyright (C) 2006-9 Red Hat, Inc 4 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
5 * 5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory. 7 * the COPYING file in the top-level directory.
@@ -58,12 +58,10 @@ static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work) 58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
59{ 59{
60 struct kvm_assigned_dev_kernel *assigned_dev; 60 struct kvm_assigned_dev_kernel *assigned_dev;
61 struct kvm *kvm;
62 int i; 61 int i;
63 62
64 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel, 63 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
65 interrupt_work); 64 interrupt_work);
66 kvm = assigned_dev->kvm;
67 65
68 spin_lock_irq(&assigned_dev->assigned_dev_lock); 66 spin_lock_irq(&assigned_dev->assigned_dev_lock);
69 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) { 67 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
@@ -448,9 +446,6 @@ static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
448 struct kvm_assigned_dev_kernel *match; 446 struct kvm_assigned_dev_kernel *match;
449 unsigned long host_irq_type, guest_irq_type; 447 unsigned long host_irq_type, guest_irq_type;
450 448
451 if (!capable(CAP_SYS_RAWIO))
452 return -EPERM;
453
454 if (!irqchip_in_kernel(kvm)) 449 if (!irqchip_in_kernel(kvm))
455 return r; 450 return r;
456 451
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 53850177163f..fc8487564d1f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -2,6 +2,7 @@
2 * KVM coalesced MMIO 2 * KVM coalesced MMIO
3 * 3 *
4 * Copyright (c) 2008 Bull S.A.S. 4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
5 * 6 *
6 * Author: Laurent Vivier <Laurent.Vivier@bull.net> 7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
7 * 8 *
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index b81f0ebbaaad..66cf65b510b1 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -2,6 +2,7 @@
2 * kvm eventfd support - use eventfd objects to signal various KVM events 2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 * 3 *
4 * Copyright 2009 Novell. All Rights Reserved. 4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
5 * 6 *
6 * Author: 7 * Author:
7 * Gregory Haskins <ghaskins@novell.com> 8 * Gregory Haskins <ghaskins@novell.com>
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index 3500dee9cf2b..0b9df8303dcf 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2001 MandrakeSoft S.A. 2 * Copyright (C) 2001 MandrakeSoft S.A.
3 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
3 * 4 *
4 * MandrakeSoft S.A. 5 * MandrakeSoft S.A.
5 * 43, rue d'Aboukir 6 * 43, rue d'Aboukir
@@ -151,7 +152,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
151 update_handled_vectors(ioapic); 152 update_handled_vectors(ioapic);
152 mask_after = e->fields.mask; 153 mask_after = e->fields.mask;
153 if (mask_before != mask_after) 154 if (mask_before != mask_after)
154 kvm_fire_mask_notifiers(ioapic->kvm, index, mask_after); 155 kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
155 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG 156 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
156 && ioapic->irr & (1 << index)) 157 && ioapic->irr & (1 << index))
157 ioapic_service(ioapic, index); 158 ioapic_service(ioapic, index);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 96048ee9e39e..62a9caf0563c 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -16,6 +16,8 @@
16 * 16 *
17 * Copyright (C) 2006-2008 Intel Corporation 17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008 18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
19 * Author: Allen M. Kay <allen.m.kay@intel.com> 21 * Author: Allen M. Kay <allen.m.kay@intel.com>
20 * Author: Weidong Han <weidong.han@intel.com> 22 * Author: Weidong Han <weidong.han@intel.com>
21 * Author: Ben-Ami Yassour <benami@il.ibm.com> 23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
@@ -106,7 +108,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
106 get_order(page_size), flags); 108 get_order(page_size), flags);
107 if (r) { 109 if (r) {
108 printk(KERN_ERR "kvm_iommu_map_address:" 110 printk(KERN_ERR "kvm_iommu_map_address:"
109 "iommu failed to map pfn=%lx\n", pfn); 111 "iommu failed to map pfn=%llx\n", pfn);
110 goto unmap_pages; 112 goto unmap_pages;
111 } 113 }
112 114
@@ -124,9 +126,10 @@ unmap_pages:
124 126
125static int kvm_iommu_map_memslots(struct kvm *kvm) 127static int kvm_iommu_map_memslots(struct kvm *kvm)
126{ 128{
127 int i, r = 0; 129 int i, idx, r = 0;
128 struct kvm_memslots *slots; 130 struct kvm_memslots *slots;
129 131
132 idx = srcu_read_lock(&kvm->srcu);
130 slots = kvm_memslots(kvm); 133 slots = kvm_memslots(kvm);
131 134
132 for (i = 0; i < slots->nmemslots; i++) { 135 for (i = 0; i < slots->nmemslots; i++) {
@@ -134,6 +137,7 @@ static int kvm_iommu_map_memslots(struct kvm *kvm)
134 if (r) 137 if (r)
135 break; 138 break;
136 } 139 }
140 srcu_read_unlock(&kvm->srcu, idx);
137 141
138 return r; 142 return r;
139} 143}
@@ -283,15 +287,17 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
283 287
284static int kvm_iommu_unmap_memslots(struct kvm *kvm) 288static int kvm_iommu_unmap_memslots(struct kvm *kvm)
285{ 289{
286 int i; 290 int i, idx;
287 struct kvm_memslots *slots; 291 struct kvm_memslots *slots;
288 292
293 idx = srcu_read_lock(&kvm->srcu);
289 slots = kvm_memslots(kvm); 294 slots = kvm_memslots(kvm);
290 295
291 for (i = 0; i < slots->nmemslots; i++) { 296 for (i = 0; i < slots->nmemslots; i++) {
292 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn, 297 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
293 slots->memslots[i].npages); 298 slots->memslots[i].npages);
294 } 299 }
300 srcu_read_unlock(&kvm->srcu, idx);
295 301
296 return 0; 302 return 0;
297} 303}
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a0e88809e45e..369e38010ad5 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -17,6 +17,7 @@
17 * Authors: 17 * Authors:
18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com> 18 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
19 * 19 *
20 * Copyright 2010 Red Hat, Inc. and/or its affilates.
20 */ 21 */
21 22
22#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
@@ -99,7 +100,7 @@ int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
99 if (r < 0) 100 if (r < 0)
100 r = 0; 101 r = 0;
101 r += kvm_apic_set_irq(vcpu, irq); 102 r += kvm_apic_set_irq(vcpu, irq);
102 } else { 103 } else if (kvm_lapic_enabled(vcpu)) {
103 if (!lowest) 104 if (!lowest)
104 lowest = vcpu; 105 lowest = vcpu;
105 else if (kvm_apic_compare_prio(vcpu, lowest) < 0) 106 else if (kvm_apic_compare_prio(vcpu, lowest) < 0)
@@ -278,15 +279,19 @@ void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
278 synchronize_rcu(); 279 synchronize_rcu();
279} 280}
280 281
281void kvm_fire_mask_notifiers(struct kvm *kvm, int irq, bool mask) 282void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
283 bool mask)
282{ 284{
283 struct kvm_irq_mask_notifier *kimn; 285 struct kvm_irq_mask_notifier *kimn;
284 struct hlist_node *n; 286 struct hlist_node *n;
287 int gsi;
285 288
286 rcu_read_lock(); 289 rcu_read_lock();
287 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link) 290 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
288 if (kimn->irq == irq) 291 if (gsi != -1)
289 kimn->func(kimn, mask); 292 hlist_for_each_entry_rcu(kimn, n, &kvm->mask_notifier_list, link)
293 if (kimn->irq == gsi)
294 kimn->func(kimn, mask);
290 rcu_read_unlock(); 295 rcu_read_unlock();
291} 296}
292 297
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f032806a212f..b78b794c1039 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -5,6 +5,7 @@
5 * machines without emulation or binary translation. 5 * machines without emulation or binary translation.
6 * 6 *
7 * Copyright (C) 2006 Qumranet, Inc. 7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2010 Red Hat, Inc. and/or its affilates.
8 * 9 *
9 * Authors: 10 * Authors:
10 * Avi Kivity <avi@qumranet.com> 11 * Avi Kivity <avi@qumranet.com>
@@ -92,6 +93,12 @@ static bool kvm_rebooting;
92 93
93static bool largepages_enabled = true; 94static bool largepages_enabled = true;
94 95
96static struct page *hwpoison_page;
97static pfn_t hwpoison_pfn;
98
99static struct page *fault_page;
100static pfn_t fault_pfn;
101
95inline int kvm_is_mmio_pfn(pfn_t pfn) 102inline int kvm_is_mmio_pfn(pfn_t pfn)
96{ 103{
97 if (pfn_valid(pfn)) { 104 if (pfn_valid(pfn)) {
@@ -141,7 +148,7 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
141 raw_spin_lock(&kvm->requests_lock); 148 raw_spin_lock(&kvm->requests_lock);
142 me = smp_processor_id(); 149 me = smp_processor_id();
143 kvm_for_each_vcpu(i, vcpu, kvm) { 150 kvm_for_each_vcpu(i, vcpu, kvm) {
144 if (test_and_set_bit(req, &vcpu->requests)) 151 if (kvm_make_check_request(req, vcpu))
145 continue; 152 continue;
146 cpu = vcpu->cpu; 153 cpu = vcpu->cpu;
147 if (cpus != NULL && cpu != -1 && cpu != me) 154 if (cpus != NULL && cpu != -1 && cpu != me)
@@ -566,6 +573,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
566 573
567 new = old = *memslot; 574 new = old = *memslot;
568 575
576 new.id = mem->slot;
569 new.base_gfn = base_gfn; 577 new.base_gfn = base_gfn;
570 new.npages = npages; 578 new.npages = npages;
571 new.flags = mem->flags; 579 new.flags = mem->flags;
@@ -596,7 +604,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
596 /* Allocate if a slot is being created */ 604 /* Allocate if a slot is being created */
597#ifndef CONFIG_S390 605#ifndef CONFIG_S390
598 if (npages && !new.rmap) { 606 if (npages && !new.rmap) {
599 new.rmap = vmalloc(npages * sizeof(struct page *)); 607 new.rmap = vmalloc(npages * sizeof(*new.rmap));
600 608
601 if (!new.rmap) 609 if (!new.rmap)
602 goto out_free; 610 goto out_free;
@@ -621,9 +629,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
621 if (new.lpage_info[i]) 629 if (new.lpage_info[i])
622 continue; 630 continue;
623 631
624 lpages = 1 + (base_gfn + npages - 1) / 632 lpages = 1 + ((base_gfn + npages - 1)
625 KVM_PAGES_PER_HPAGE(level); 633 >> KVM_HPAGE_GFN_SHIFT(level));
626 lpages -= base_gfn / KVM_PAGES_PER_HPAGE(level); 634 lpages -= base_gfn >> KVM_HPAGE_GFN_SHIFT(level);
627 635
628 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i])); 636 new.lpage_info[i] = vmalloc(lpages * sizeof(*new.lpage_info[i]));
629 637
@@ -633,9 +641,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
633 memset(new.lpage_info[i], 0, 641 memset(new.lpage_info[i], 0,
634 lpages * sizeof(*new.lpage_info[i])); 642 lpages * sizeof(*new.lpage_info[i]));
635 643
636 if (base_gfn % KVM_PAGES_PER_HPAGE(level)) 644 if (base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
637 new.lpage_info[i][0].write_count = 1; 645 new.lpage_info[i][0].write_count = 1;
638 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE(level)) 646 if ((base_gfn+npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
639 new.lpage_info[i][lpages - 1].write_count = 1; 647 new.lpage_info[i][lpages - 1].write_count = 1;
640 ugfn = new.userspace_addr >> PAGE_SHIFT; 648 ugfn = new.userspace_addr >> PAGE_SHIFT;
641 /* 649 /*
@@ -810,16 +818,28 @@ EXPORT_SYMBOL_GPL(kvm_disable_largepages);
810 818
811int is_error_page(struct page *page) 819int is_error_page(struct page *page)
812{ 820{
813 return page == bad_page; 821 return page == bad_page || page == hwpoison_page || page == fault_page;
814} 822}
815EXPORT_SYMBOL_GPL(is_error_page); 823EXPORT_SYMBOL_GPL(is_error_page);
816 824
817int is_error_pfn(pfn_t pfn) 825int is_error_pfn(pfn_t pfn)
818{ 826{
819 return pfn == bad_pfn; 827 return pfn == bad_pfn || pfn == hwpoison_pfn || pfn == fault_pfn;
820} 828}
821EXPORT_SYMBOL_GPL(is_error_pfn); 829EXPORT_SYMBOL_GPL(is_error_pfn);
822 830
831int is_hwpoison_pfn(pfn_t pfn)
832{
833 return pfn == hwpoison_pfn;
834}
835EXPORT_SYMBOL_GPL(is_hwpoison_pfn);
836
837int is_fault_pfn(pfn_t pfn)
838{
839 return pfn == fault_pfn;
840}
841EXPORT_SYMBOL_GPL(is_fault_pfn);
842
823static inline unsigned long bad_hva(void) 843static inline unsigned long bad_hva(void)
824{ 844{
825 return PAGE_OFFSET; 845 return PAGE_OFFSET;
@@ -831,7 +851,7 @@ int kvm_is_error_hva(unsigned long addr)
831} 851}
832EXPORT_SYMBOL_GPL(kvm_is_error_hva); 852EXPORT_SYMBOL_GPL(kvm_is_error_hva);
833 853
834struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn) 854struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
835{ 855{
836 int i; 856 int i;
837 struct kvm_memslots *slots = kvm_memslots(kvm); 857 struct kvm_memslots *slots = kvm_memslots(kvm);
@@ -845,20 +865,13 @@ struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
845 } 865 }
846 return NULL; 866 return NULL;
847} 867}
848EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased); 868EXPORT_SYMBOL_GPL(gfn_to_memslot);
849
850struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
851{
852 gfn = unalias_gfn(kvm, gfn);
853 return gfn_to_memslot_unaliased(kvm, gfn);
854}
855 869
856int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) 870int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
857{ 871{
858 int i; 872 int i;
859 struct kvm_memslots *slots = kvm_memslots(kvm); 873 struct kvm_memslots *slots = kvm_memslots(kvm);
860 874
861 gfn = unalias_gfn_instantiation(kvm, gfn);
862 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) { 875 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
863 struct kvm_memory_slot *memslot = &slots->memslots[i]; 876 struct kvm_memory_slot *memslot = &slots->memslots[i];
864 877
@@ -903,7 +916,6 @@ int memslot_id(struct kvm *kvm, gfn_t gfn)
903 struct kvm_memslots *slots = kvm_memslots(kvm); 916 struct kvm_memslots *slots = kvm_memslots(kvm);
904 struct kvm_memory_slot *memslot = NULL; 917 struct kvm_memory_slot *memslot = NULL;
905 918
906 gfn = unalias_gfn(kvm, gfn);
907 for (i = 0; i < slots->nmemslots; ++i) { 919 for (i = 0; i < slots->nmemslots; ++i) {
908 memslot = &slots->memslots[i]; 920 memslot = &slots->memslots[i];
909 921
@@ -924,8 +936,7 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
924{ 936{
925 struct kvm_memory_slot *slot; 937 struct kvm_memory_slot *slot;
926 938
927 gfn = unalias_gfn_instantiation(kvm, gfn); 939 slot = gfn_to_memslot(kvm, gfn);
928 slot = gfn_to_memslot_unaliased(kvm, gfn);
929 if (!slot || slot->flags & KVM_MEMSLOT_INVALID) 940 if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
930 return bad_hva(); 941 return bad_hva();
931 return gfn_to_hva_memslot(slot, gfn); 942 return gfn_to_hva_memslot(slot, gfn);
@@ -946,13 +957,19 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr)
946 struct vm_area_struct *vma; 957 struct vm_area_struct *vma;
947 958
948 down_read(&current->mm->mmap_sem); 959 down_read(&current->mm->mmap_sem);
960 if (is_hwpoison_address(addr)) {
961 up_read(&current->mm->mmap_sem);
962 get_page(hwpoison_page);
963 return page_to_pfn(hwpoison_page);
964 }
965
949 vma = find_vma(current->mm, addr); 966 vma = find_vma(current->mm, addr);
950 967
951 if (vma == NULL || addr < vma->vm_start || 968 if (vma == NULL || addr < vma->vm_start ||
952 !(vma->vm_flags & VM_PFNMAP)) { 969 !(vma->vm_flags & VM_PFNMAP)) {
953 up_read(&current->mm->mmap_sem); 970 up_read(&current->mm->mmap_sem);
954 get_page(bad_page); 971 get_page(fault_page);
955 return page_to_pfn(bad_page); 972 return page_to_pfn(fault_page);
956 } 973 }
957 974
958 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 975 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
@@ -1187,8 +1204,7 @@ void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1187{ 1204{
1188 struct kvm_memory_slot *memslot; 1205 struct kvm_memory_slot *memslot;
1189 1206
1190 gfn = unalias_gfn(kvm, gfn); 1207 memslot = gfn_to_memslot(kvm, gfn);
1191 memslot = gfn_to_memslot_unaliased(kvm, gfn);
1192 if (memslot && memslot->dirty_bitmap) { 1208 if (memslot && memslot->dirty_bitmap) {
1193 unsigned long rel_gfn = gfn - memslot->base_gfn; 1209 unsigned long rel_gfn = gfn - memslot->base_gfn;
1194 1210
@@ -1207,7 +1223,7 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1207 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1223 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
1208 1224
1209 if (kvm_arch_vcpu_runnable(vcpu)) { 1225 if (kvm_arch_vcpu_runnable(vcpu)) {
1210 set_bit(KVM_REQ_UNHALT, &vcpu->requests); 1226 kvm_make_request(KVM_REQ_UNHALT, vcpu);
1211 break; 1227 break;
1212 } 1228 }
1213 if (kvm_cpu_has_pending_timer(vcpu)) 1229 if (kvm_cpu_has_pending_timer(vcpu))
@@ -1378,6 +1394,18 @@ static long kvm_vcpu_ioctl(struct file *filp,
1378 1394
1379 if (vcpu->kvm->mm != current->mm) 1395 if (vcpu->kvm->mm != current->mm)
1380 return -EIO; 1396 return -EIO;
1397
1398#if defined(CONFIG_S390) || defined(CONFIG_PPC)
1399 /*
1400 * Special cases: vcpu ioctls that are asynchronous to vcpu execution,
1401 * so vcpu_load() would break it.
1402 */
1403 if (ioctl == KVM_S390_INTERRUPT || ioctl == KVM_INTERRUPT)
1404 return kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1405#endif
1406
1407
1408 vcpu_load(vcpu);
1381 switch (ioctl) { 1409 switch (ioctl) {
1382 case KVM_RUN: 1410 case KVM_RUN:
1383 r = -EINVAL; 1411 r = -EINVAL;
@@ -1520,7 +1548,7 @@ out_free2:
1520 goto out; 1548 goto out;
1521 p = &sigset; 1549 p = &sigset;
1522 } 1550 }
1523 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); 1551 r = kvm_vcpu_ioctl_set_sigmask(vcpu, p);
1524 break; 1552 break;
1525 } 1553 }
1526 case KVM_GET_FPU: { 1554 case KVM_GET_FPU: {
@@ -1555,6 +1583,7 @@ out_free2:
1555 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1583 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1556 } 1584 }
1557out: 1585out:
1586 vcpu_put(vcpu);
1558 kfree(fpu); 1587 kfree(fpu);
1559 kfree(kvm_sregs); 1588 kfree(kvm_sregs);
1560 return r; 1589 return r;
@@ -2197,6 +2226,24 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
2197 2226
2198 bad_pfn = page_to_pfn(bad_page); 2227 bad_pfn = page_to_pfn(bad_page);
2199 2228
2229 hwpoison_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2230
2231 if (hwpoison_page == NULL) {
2232 r = -ENOMEM;
2233 goto out_free_0;
2234 }
2235
2236 hwpoison_pfn = page_to_pfn(hwpoison_page);
2237
2238 fault_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2239
2240 if (fault_page == NULL) {
2241 r = -ENOMEM;
2242 goto out_free_0;
2243 }
2244
2245 fault_pfn = page_to_pfn(fault_page);
2246
2200 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) { 2247 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
2201 r = -ENOMEM; 2248 r = -ENOMEM;
2202 goto out_free_0; 2249 goto out_free_0;
@@ -2269,6 +2316,10 @@ out_free_1:
2269out_free_0a: 2316out_free_0a:
2270 free_cpumask_var(cpus_hardware_enabled); 2317 free_cpumask_var(cpus_hardware_enabled);
2271out_free_0: 2318out_free_0:
2319 if (fault_page)
2320 __free_page(fault_page);
2321 if (hwpoison_page)
2322 __free_page(hwpoison_page);
2272 __free_page(bad_page); 2323 __free_page(bad_page);
2273out: 2324out:
2274 kvm_arch_exit(); 2325 kvm_arch_exit();
@@ -2290,6 +2341,7 @@ void kvm_exit(void)
2290 kvm_arch_hardware_unsetup(); 2341 kvm_arch_hardware_unsetup();
2291 kvm_arch_exit(); 2342 kvm_arch_exit();
2292 free_cpumask_var(cpus_hardware_enabled); 2343 free_cpumask_var(cpus_hardware_enabled);
2344 __free_page(hwpoison_page);
2293 __free_page(bad_page); 2345 __free_page(bad_page);
2294} 2346}
2295EXPORT_SYMBOL_GPL(kvm_exit); 2347EXPORT_SYMBOL_GPL(kvm_exit);