aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorIgor Mammedov <imammedo@redhat.com>2015-07-01 05:07:09 -0400
committerMichael S. Tsirkin <mst@redhat.com>2015-07-13 16:17:18 -0400
commit4de7255f7d2be5e51664c6ac6011ffd6e5463571 (patch)
tree625b30fbabffe479856e066d83857011aebc64e0 /drivers/vhost
parentea52bf8eda9832ad30e9f059c5ead8d44f882a53 (diff)
vhost: extend memory regions allocation to vmalloc
with large number of memory regions we could end up with high order allocations and kmalloc could fail if host is under memory pressure. Considering that memory regions array is used on hot path try harder to allocate using kmalloc and if it fails resort to vmalloc. It's still better than just failing vhost_set_memory() and causing guest crash due to it when a new memory hotplugged to guest. I'll still look at QEMU side solution to reduce amount of memory regions it feeds to vhost to make things even better, but it doesn't hurt for kernel to behave smarter and don't crash older QEMU's which could use large amount of memory regions. Signed-off-by: Igor Mammedov <imammedo@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/vhost.c21
1 files changed, 17 insertions, 4 deletions
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 71bb46813031..a4ac369f6adb 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,6 +22,7 @@
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/vmalloc.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/module.h> 28#include <linux/module.h>
@@ -544,7 +545,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
544 fput(dev->log_file); 545 fput(dev->log_file);
545 dev->log_file = NULL; 546 dev->log_file = NULL;
546 /* No one will access memory at this point */ 547 /* No one will access memory at this point */
547 kfree(dev->memory); 548 kvfree(dev->memory);
548 dev->memory = NULL; 549 dev->memory = NULL;
549 WARN_ON(!list_empty(&dev->work_list)); 550 WARN_ON(!list_empty(&dev->work_list));
550 if (dev->worker) { 551 if (dev->worker) {
@@ -674,6 +675,18 @@ static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
674 return 0; 675 return 0;
675} 676}
676 677
678static void *vhost_kvzalloc(unsigned long size)
679{
680 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
681
682 if (!n) {
683 n = vzalloc(size);
684 if (!n)
685 return ERR_PTR(-ENOMEM);
686 }
687 return n;
688}
689
677static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 690static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
678{ 691{
679 struct vhost_memory mem, *newmem, *oldmem; 692 struct vhost_memory mem, *newmem, *oldmem;
@@ -686,7 +699,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
686 return -EOPNOTSUPP; 699 return -EOPNOTSUPP;
687 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 700 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
688 return -E2BIG; 701 return -E2BIG;
689 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 702 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
690 if (!newmem) 703 if (!newmem)
691 return -ENOMEM; 704 return -ENOMEM;
692 705
@@ -700,7 +713,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
700 vhost_memory_reg_sort_cmp, NULL); 713 vhost_memory_reg_sort_cmp, NULL);
701 714
702 if (!memory_access_ok(d, newmem, 0)) { 715 if (!memory_access_ok(d, newmem, 0)) {
703 kfree(newmem); 716 kvfree(newmem);
704 return -EFAULT; 717 return -EFAULT;
705 } 718 }
706 oldmem = d->memory; 719 oldmem = d->memory;
@@ -712,7 +725,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
712 d->vqs[i]->memory = newmem; 725 d->vqs[i]->memory = newmem;
713 mutex_unlock(&d->vqs[i]->mutex); 726 mutex_unlock(&d->vqs[i]->mutex);
714 } 727 }
715 kfree(oldmem); 728 kvfree(oldmem);
716 return 0; 729 return 0;
717} 730}
718 731