aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c382
1 files changed, 337 insertions, 45 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7dd9b0b85e4e..cf0ab8ed3845 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -51,6 +51,12 @@
51#include "coalesced_mmio.h" 51#include "coalesced_mmio.h"
52#endif 52#endif
53 53
54#ifdef KVM_CAP_DEVICE_ASSIGNMENT
55#include <linux/pci.h>
56#include <linux/interrupt.h>
57#include "irq.h"
58#endif
59
54MODULE_AUTHOR("Qumranet"); 60MODULE_AUTHOR("Qumranet");
55MODULE_LICENSE("GPL"); 61MODULE_LICENSE("GPL");
56 62
@@ -71,11 +77,253 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
71 77
72bool kvm_rebooting; 78bool kvm_rebooting;
73 79
80#ifdef KVM_CAP_DEVICE_ASSIGNMENT
81static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
82 int assigned_dev_id)
83{
84 struct list_head *ptr;
85 struct kvm_assigned_dev_kernel *match;
86
87 list_for_each(ptr, head) {
88 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
89 if (match->assigned_dev_id == assigned_dev_id)
90 return match;
91 }
92 return NULL;
93}
94
95static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
96{
97 struct kvm_assigned_dev_kernel *assigned_dev;
98
99 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
100 interrupt_work);
101
102 /* This is taken to safely inject irq inside the guest. When
103 * the interrupt injection (or the ioapic code) uses a
104 * finer-grained lock, update this
105 */
106 mutex_lock(&assigned_dev->kvm->lock);
107 kvm_set_irq(assigned_dev->kvm,
108 assigned_dev->guest_irq, 1);
109 mutex_unlock(&assigned_dev->kvm->lock);
110 kvm_put_kvm(assigned_dev->kvm);
111}
112
113/* FIXME: Implement the OR logic needed to make shared interrupts on
114 * this line behave properly
115 */
116static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
117{
118 struct kvm_assigned_dev_kernel *assigned_dev =
119 (struct kvm_assigned_dev_kernel *) dev_id;
120
121 kvm_get_kvm(assigned_dev->kvm);
122 schedule_work(&assigned_dev->interrupt_work);
123 disable_irq_nosync(irq);
124 return IRQ_HANDLED;
125}
126
127/* Ack the irq line for an assigned device */
128static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
129{
130 struct kvm_assigned_dev_kernel *dev;
131
132 if (kian->gsi == -1)
133 return;
134
135 dev = container_of(kian, struct kvm_assigned_dev_kernel,
136 ack_notifier);
137 kvm_set_irq(dev->kvm, dev->guest_irq, 0);
138 enable_irq(dev->host_irq);
139}
140
141static void kvm_free_assigned_device(struct kvm *kvm,
142 struct kvm_assigned_dev_kernel
143 *assigned_dev)
144{
145 if (irqchip_in_kernel(kvm) && assigned_dev->irq_requested)
146 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
147
148 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
149
150 if (cancel_work_sync(&assigned_dev->interrupt_work))
151 /* We had pending work. That means we will have to take
152 * care of kvm_put_kvm.
153 */
154 kvm_put_kvm(kvm);
155
156 pci_release_regions(assigned_dev->dev);
157 pci_disable_device(assigned_dev->dev);
158 pci_dev_put(assigned_dev->dev);
159
160 list_del(&assigned_dev->list);
161 kfree(assigned_dev);
162}
163
164void kvm_free_all_assigned_devices(struct kvm *kvm)
165{
166 struct list_head *ptr, *ptr2;
167 struct kvm_assigned_dev_kernel *assigned_dev;
168
169 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
170 assigned_dev = list_entry(ptr,
171 struct kvm_assigned_dev_kernel,
172 list);
173
174 kvm_free_assigned_device(kvm, assigned_dev);
175 }
176}
177
178static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
179 struct kvm_assigned_irq
180 *assigned_irq)
181{
182 int r = 0;
183 struct kvm_assigned_dev_kernel *match;
184
185 mutex_lock(&kvm->lock);
186
187 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
188 assigned_irq->assigned_dev_id);
189 if (!match) {
190 mutex_unlock(&kvm->lock);
191 return -EINVAL;
192 }
193
194 if (match->irq_requested) {
195 match->guest_irq = assigned_irq->guest_irq;
196 match->ack_notifier.gsi = assigned_irq->guest_irq;
197 mutex_unlock(&kvm->lock);
198 return 0;
199 }
200
201 INIT_WORK(&match->interrupt_work,
202 kvm_assigned_dev_interrupt_work_handler);
203
204 if (irqchip_in_kernel(kvm)) {
205 if (!capable(CAP_SYS_RAWIO)) {
206 r = -EPERM;
207 goto out_release;
208 }
209
210 if (assigned_irq->host_irq)
211 match->host_irq = assigned_irq->host_irq;
212 else
213 match->host_irq = match->dev->irq;
214 match->guest_irq = assigned_irq->guest_irq;
215 match->ack_notifier.gsi = assigned_irq->guest_irq;
216 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
217 kvm_register_irq_ack_notifier(kvm, &match->ack_notifier);
218
219 /* Even though this is PCI, we don't want to use shared
220 * interrupts. Sharing host devices with guest-assigned devices
221 * on the same interrupt line is not a happy situation: there
222 * are going to be long delays in accepting, acking, etc.
223 */
224 if (request_irq(match->host_irq, kvm_assigned_dev_intr, 0,
225 "kvm_assigned_device", (void *)match)) {
226 r = -EIO;
227 goto out_release;
228 }
229 }
230
231 match->irq_requested = true;
232 mutex_unlock(&kvm->lock);
233 return r;
234out_release:
235 mutex_unlock(&kvm->lock);
236 kvm_free_assigned_device(kvm, match);
237 return r;
238}
239
240static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
241 struct kvm_assigned_pci_dev *assigned_dev)
242{
243 int r = 0;
244 struct kvm_assigned_dev_kernel *match;
245 struct pci_dev *dev;
246
247 mutex_lock(&kvm->lock);
248
249 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
250 assigned_dev->assigned_dev_id);
251 if (match) {
252 /* device already assigned */
253 r = -EINVAL;
254 goto out;
255 }
256
257 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
258 if (match == NULL) {
259 printk(KERN_INFO "%s: Couldn't allocate memory\n",
260 __func__);
261 r = -ENOMEM;
262 goto out;
263 }
264 dev = pci_get_bus_and_slot(assigned_dev->busnr,
265 assigned_dev->devfn);
266 if (!dev) {
267 printk(KERN_INFO "%s: host device not found\n", __func__);
268 r = -EINVAL;
269 goto out_free;
270 }
271 if (pci_enable_device(dev)) {
272 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
273 r = -EBUSY;
274 goto out_put;
275 }
276 r = pci_request_regions(dev, "kvm_assigned_device");
277 if (r) {
278 printk(KERN_INFO "%s: Could not get access to device regions\n",
279 __func__);
280 goto out_disable;
281 }
282 match->assigned_dev_id = assigned_dev->assigned_dev_id;
283 match->host_busnr = assigned_dev->busnr;
284 match->host_devfn = assigned_dev->devfn;
285 match->dev = dev;
286
287 match->kvm = kvm;
288
289 list_add(&match->list, &kvm->arch.assigned_dev_head);
290
291 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
292 r = kvm_iommu_map_guest(kvm, match);
293 if (r)
294 goto out_list_del;
295 }
296
297out:
298 mutex_unlock(&kvm->lock);
299 return r;
300out_list_del:
301 list_del(&match->list);
302 pci_release_regions(dev);
303out_disable:
304 pci_disable_device(dev);
305out_put:
306 pci_dev_put(dev);
307out_free:
308 kfree(match);
309 mutex_unlock(&kvm->lock);
310 return r;
311}
312#endif
313
74static inline int valid_vcpu(int n) 314static inline int valid_vcpu(int n)
75{ 315{
76 return likely(n >= 0 && n < KVM_MAX_VCPUS); 316 return likely(n >= 0 && n < KVM_MAX_VCPUS);
77} 317}
78 318
319inline int kvm_is_mmio_pfn(pfn_t pfn)
320{
321 if (pfn_valid(pfn))
322 return PageReserved(pfn_to_page(pfn));
323
324 return true;
325}
326
79/* 327/*
80 * Switches to specified vcpu, until a matching vcpu_put() 328 * Switches to specified vcpu, until a matching vcpu_put()
81 */ 329 */
@@ -570,6 +818,12 @@ int __kvm_set_memory_region(struct kvm *kvm,
570 } 818 }
571 819
572 kvm_free_physmem_slot(&old, &new); 820 kvm_free_physmem_slot(&old, &new);
821#ifdef CONFIG_DMAR
822 /* map the pages in iommu page table */
823 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
824 if (r)
825 goto out;
826#endif
573 return 0; 827 return 0;
574 828
575out_free: 829out_free:
@@ -708,9 +962,6 @@ unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
708} 962}
709EXPORT_SYMBOL_GPL(gfn_to_hva); 963EXPORT_SYMBOL_GPL(gfn_to_hva);
710 964
711/*
712 * Requires current->mm->mmap_sem to be held
713 */
714pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) 965pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
715{ 966{
716 struct page *page[1]; 967 struct page *page[1];
@@ -726,21 +977,24 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
726 return page_to_pfn(bad_page); 977 return page_to_pfn(bad_page);
727 } 978 }
728 979
729 npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page, 980 npages = get_user_pages_fast(addr, 1, 1, page);
730 NULL);
731 981
732 if (unlikely(npages != 1)) { 982 if (unlikely(npages != 1)) {
733 struct vm_area_struct *vma; 983 struct vm_area_struct *vma;
734 984
985 down_read(&current->mm->mmap_sem);
735 vma = find_vma(current->mm, addr); 986 vma = find_vma(current->mm, addr);
987
736 if (vma == NULL || addr < vma->vm_start || 988 if (vma == NULL || addr < vma->vm_start ||
737 !(vma->vm_flags & VM_PFNMAP)) { 989 !(vma->vm_flags & VM_PFNMAP)) {
990 up_read(&current->mm->mmap_sem);
738 get_page(bad_page); 991 get_page(bad_page);
739 return page_to_pfn(bad_page); 992 return page_to_pfn(bad_page);
740 } 993 }
741 994
742 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; 995 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
743 BUG_ON(pfn_valid(pfn)); 996 up_read(&current->mm->mmap_sem);
997 BUG_ON(!kvm_is_mmio_pfn(pfn));
744 } else 998 } else
745 pfn = page_to_pfn(page[0]); 999 pfn = page_to_pfn(page[0]);
746 1000
@@ -754,10 +1008,10 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
754 pfn_t pfn; 1008 pfn_t pfn;
755 1009
756 pfn = gfn_to_pfn(kvm, gfn); 1010 pfn = gfn_to_pfn(kvm, gfn);
757 if (pfn_valid(pfn)) 1011 if (!kvm_is_mmio_pfn(pfn))
758 return pfn_to_page(pfn); 1012 return pfn_to_page(pfn);
759 1013
760 WARN_ON(!pfn_valid(pfn)); 1014 WARN_ON(kvm_is_mmio_pfn(pfn));
761 1015
762 get_page(bad_page); 1016 get_page(bad_page);
763 return bad_page; 1017 return bad_page;
@@ -773,7 +1027,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
773 1027
774void kvm_release_pfn_clean(pfn_t pfn) 1028void kvm_release_pfn_clean(pfn_t pfn)
775{ 1029{
776 if (pfn_valid(pfn)) 1030 if (!kvm_is_mmio_pfn(pfn))
777 put_page(pfn_to_page(pfn)); 1031 put_page(pfn_to_page(pfn));
778} 1032}
779EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1033EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -799,7 +1053,7 @@ EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
799 1053
800void kvm_set_pfn_dirty(pfn_t pfn) 1054void kvm_set_pfn_dirty(pfn_t pfn)
801{ 1055{
802 if (pfn_valid(pfn)) { 1056 if (!kvm_is_mmio_pfn(pfn)) {
803 struct page *page = pfn_to_page(pfn); 1057 struct page *page = pfn_to_page(pfn);
804 if (!PageReserved(page)) 1058 if (!PageReserved(page))
805 SetPageDirty(page); 1059 SetPageDirty(page);
@@ -809,14 +1063,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
809 1063
810void kvm_set_pfn_accessed(pfn_t pfn) 1064void kvm_set_pfn_accessed(pfn_t pfn)
811{ 1065{
812 if (pfn_valid(pfn)) 1066 if (!kvm_is_mmio_pfn(pfn))
813 mark_page_accessed(pfn_to_page(pfn)); 1067 mark_page_accessed(pfn_to_page(pfn));
814} 1068}
815EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1069EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
816 1070
817void kvm_get_pfn(pfn_t pfn) 1071void kvm_get_pfn(pfn_t pfn)
818{ 1072{
819 if (pfn_valid(pfn)) 1073 if (!kvm_is_mmio_pfn(pfn))
820 get_page(pfn_to_page(pfn)); 1074 get_page(pfn_to_page(pfn));
821} 1075}
822EXPORT_SYMBOL_GPL(kvm_get_pfn); 1076EXPORT_SYMBOL_GPL(kvm_get_pfn);
@@ -972,12 +1226,12 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
972 for (;;) { 1226 for (;;) {
973 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 1227 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
974 1228
975 if (kvm_cpu_has_interrupt(vcpu)) 1229 if (kvm_cpu_has_interrupt(vcpu) ||
976 break; 1230 kvm_cpu_has_pending_timer(vcpu) ||
977 if (kvm_cpu_has_pending_timer(vcpu)) 1231 kvm_arch_vcpu_runnable(vcpu)) {
978 break; 1232 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
979 if (kvm_arch_vcpu_runnable(vcpu))
980 break; 1233 break;
1234 }
981 if (signal_pending(current)) 1235 if (signal_pending(current))
982 break; 1236 break;
983 1237
@@ -1074,12 +1328,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1074 1328
1075 r = kvm_arch_vcpu_setup(vcpu); 1329 r = kvm_arch_vcpu_setup(vcpu);
1076 if (r) 1330 if (r)
1077 goto vcpu_destroy; 1331 return r;
1078 1332
1079 mutex_lock(&kvm->lock); 1333 mutex_lock(&kvm->lock);
1080 if (kvm->vcpus[n]) { 1334 if (kvm->vcpus[n]) {
1081 r = -EEXIST; 1335 r = -EEXIST;
1082 mutex_unlock(&kvm->lock);
1083 goto vcpu_destroy; 1336 goto vcpu_destroy;
1084 } 1337 }
1085 kvm->vcpus[n] = vcpu; 1338 kvm->vcpus[n] = vcpu;
@@ -1095,8 +1348,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1095unlink: 1348unlink:
1096 mutex_lock(&kvm->lock); 1349 mutex_lock(&kvm->lock);
1097 kvm->vcpus[n] = NULL; 1350 kvm->vcpus[n] = NULL;
1098 mutex_unlock(&kvm->lock);
1099vcpu_destroy: 1351vcpu_destroy:
1352 mutex_unlock(&kvm->lock);
1100 kvm_arch_vcpu_destroy(vcpu); 1353 kvm_arch_vcpu_destroy(vcpu);
1101 return r; 1354 return r;
1102} 1355}
@@ -1118,6 +1371,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
1118 struct kvm_vcpu *vcpu = filp->private_data; 1371 struct kvm_vcpu *vcpu = filp->private_data;
1119 void __user *argp = (void __user *)arg; 1372 void __user *argp = (void __user *)arg;
1120 int r; 1373 int r;
1374 struct kvm_fpu *fpu = NULL;
1375 struct kvm_sregs *kvm_sregs = NULL;
1121 1376
1122 if (vcpu->kvm->mm != current->mm) 1377 if (vcpu->kvm->mm != current->mm)
1123 return -EIO; 1378 return -EIO;
@@ -1165,25 +1420,28 @@ out_free2:
1165 break; 1420 break;
1166 } 1421 }
1167 case KVM_GET_SREGS: { 1422 case KVM_GET_SREGS: {
1168 struct kvm_sregs kvm_sregs; 1423 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1169 1424 r = -ENOMEM;
1170 memset(&kvm_sregs, 0, sizeof kvm_sregs); 1425 if (!kvm_sregs)
1171 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs); 1426 goto out;
1427 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1172 if (r) 1428 if (r)
1173 goto out; 1429 goto out;
1174 r = -EFAULT; 1430 r = -EFAULT;
1175 if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs)) 1431 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1176 goto out; 1432 goto out;
1177 r = 0; 1433 r = 0;
1178 break; 1434 break;
1179 } 1435 }
1180 case KVM_SET_SREGS: { 1436 case KVM_SET_SREGS: {
1181 struct kvm_sregs kvm_sregs; 1437 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1182 1438 r = -ENOMEM;
1439 if (!kvm_sregs)
1440 goto out;
1183 r = -EFAULT; 1441 r = -EFAULT;
1184 if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs)) 1442 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1185 goto out; 1443 goto out;
1186 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs); 1444 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1187 if (r) 1445 if (r)
1188 goto out; 1446 goto out;
1189 r = 0; 1447 r = 0;
@@ -1264,25 +1522,28 @@ out_free2:
1264 break; 1522 break;
1265 } 1523 }
1266 case KVM_GET_FPU: { 1524 case KVM_GET_FPU: {
1267 struct kvm_fpu fpu; 1525 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1268 1526 r = -ENOMEM;
1269 memset(&fpu, 0, sizeof fpu); 1527 if (!fpu)
1270 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu); 1528 goto out;
1529 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1271 if (r) 1530 if (r)
1272 goto out; 1531 goto out;
1273 r = -EFAULT; 1532 r = -EFAULT;
1274 if (copy_to_user(argp, &fpu, sizeof fpu)) 1533 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1275 goto out; 1534 goto out;
1276 r = 0; 1535 r = 0;
1277 break; 1536 break;
1278 } 1537 }
1279 case KVM_SET_FPU: { 1538 case KVM_SET_FPU: {
1280 struct kvm_fpu fpu; 1539 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1281 1540 r = -ENOMEM;
1541 if (!fpu)
1542 goto out;
1282 r = -EFAULT; 1543 r = -EFAULT;
1283 if (copy_from_user(&fpu, argp, sizeof fpu)) 1544 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1284 goto out; 1545 goto out;
1285 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu); 1546 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1286 if (r) 1547 if (r)
1287 goto out; 1548 goto out;
1288 r = 0; 1549 r = 0;
@@ -1292,6 +1553,8 @@ out_free2:
1292 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg); 1553 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1293 } 1554 }
1294out: 1555out:
1556 kfree(fpu);
1557 kfree(kvm_sregs);
1295 return r; 1558 return r;
1296} 1559}
1297 1560
@@ -1360,6 +1623,30 @@ static long kvm_vm_ioctl(struct file *filp,
1360 break; 1623 break;
1361 } 1624 }
1362#endif 1625#endif
1626#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1627 case KVM_ASSIGN_PCI_DEVICE: {
1628 struct kvm_assigned_pci_dev assigned_dev;
1629
1630 r = -EFAULT;
1631 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1632 goto out;
1633 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
1634 if (r)
1635 goto out;
1636 break;
1637 }
1638 case KVM_ASSIGN_IRQ: {
1639 struct kvm_assigned_irq assigned_irq;
1640
1641 r = -EFAULT;
1642 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
1643 goto out;
1644 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
1645 if (r)
1646 goto out;
1647 break;
1648 }
1649#endif
1363 default: 1650 default:
1364 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 1651 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
1365 } 1652 }
@@ -1369,17 +1656,22 @@ out:
1369 1656
1370static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1657static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1371{ 1658{
1659 struct page *page[1];
1660 unsigned long addr;
1661 int npages;
1662 gfn_t gfn = vmf->pgoff;
1372 struct kvm *kvm = vma->vm_file->private_data; 1663 struct kvm *kvm = vma->vm_file->private_data;
1373 struct page *page;
1374 1664
1375 if (!kvm_is_visible_gfn(kvm, vmf->pgoff)) 1665 addr = gfn_to_hva(kvm, gfn);
1666 if (kvm_is_error_hva(addr))
1376 return VM_FAULT_SIGBUS; 1667 return VM_FAULT_SIGBUS;
1377 page = gfn_to_page(kvm, vmf->pgoff); 1668
1378 if (is_error_page(page)) { 1669 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1379 kvm_release_page_clean(page); 1670 NULL);
1671 if (unlikely(npages != 1))
1380 return VM_FAULT_SIGBUS; 1672 return VM_FAULT_SIGBUS;
1381 } 1673
1382 vmf->page = page; 1674 vmf->page = page[0];
1383 return 0; 1675 return 0;
1384} 1676}
1385 1677