aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2010-09-14 11:53:05 -0400
committerMichael S. Tsirkin <mst@redhat.com>2010-10-05 07:33:43 -0400
commite0e9b406470b8dd75a115cf82c4791f41615c4c3 (patch)
treed31f3ee226e91854bc97d948a30b80c18b21fa47 /drivers/vhost
parenta00eac0c459abecb539fb2a2abd3122dd7ca5d4a (diff)
vhost: max s/g to match qemu
Qemu supports up to UIO_MAXIOV s/g so we have to match that because guest drivers may rely on this. Allocate indirect and log arrays dynamically to avoid using too much contigious memory and make the length of hdr array to match the header length since each iovec entry has a least one byte. Test with copying large files w/ and w/o migration in both linux and windows guests. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost')
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/vhost.c49
-rw-r--r--drivers/vhost/vhost.h18
3 files changed, 57 insertions, 12 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 7c8008225ee3..72ab71fdf053 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -243,7 +243,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
243 int r, nlogs = 0; 243 int r, nlogs = 0;
244 244
245 while (datalen > 0) { 245 while (datalen > 0) {
246 if (unlikely(seg >= VHOST_NET_MAX_SG)) { 246 if (unlikely(seg >= UIO_MAXIOV)) {
247 r = -ENOBUFS; 247 r = -ENOBUFS;
248 goto err; 248 goto err;
249 } 249 }
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dd3d6f7406f8..344019774ddd 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -212,6 +212,45 @@ static int vhost_worker(void *data)
212 } 212 }
213} 213}
214 214
215/* Helper to allocate iovec buffers for all vqs. */
216static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
217{
218 int i;
219 for (i = 0; i < dev->nvqs; ++i) {
220 dev->vqs[i].indirect = kmalloc(sizeof *dev->vqs[i].indirect *
221 UIO_MAXIOV, GFP_KERNEL);
222 dev->vqs[i].log = kmalloc(sizeof *dev->vqs[i].log * UIO_MAXIOV,
223 GFP_KERNEL);
224 dev->vqs[i].heads = kmalloc(sizeof *dev->vqs[i].heads *
225 UIO_MAXIOV, GFP_KERNEL);
226
227 if (!dev->vqs[i].indirect || !dev->vqs[i].log ||
228 !dev->vqs[i].heads)
229 goto err_nomem;
230 }
231 return 0;
232err_nomem:
233 for (; i >= 0; --i) {
234 kfree(dev->vqs[i].indirect);
235 kfree(dev->vqs[i].log);
236 kfree(dev->vqs[i].heads);
237 }
238 return -ENOMEM;
239}
240
241static void vhost_dev_free_iovecs(struct vhost_dev *dev)
242{
243 int i;
244 for (i = 0; i < dev->nvqs; ++i) {
245 kfree(dev->vqs[i].indirect);
246 dev->vqs[i].indirect = NULL;
247 kfree(dev->vqs[i].log);
248 dev->vqs[i].log = NULL;
249 kfree(dev->vqs[i].heads);
250 dev->vqs[i].heads = NULL;
251 }
252}
253
215long vhost_dev_init(struct vhost_dev *dev, 254long vhost_dev_init(struct vhost_dev *dev,
216 struct vhost_virtqueue *vqs, int nvqs) 255 struct vhost_virtqueue *vqs, int nvqs)
217{ 256{
@@ -229,6 +268,9 @@ long vhost_dev_init(struct vhost_dev *dev,
229 dev->worker = NULL; 268 dev->worker = NULL;
230 269
231 for (i = 0; i < dev->nvqs; ++i) { 270 for (i = 0; i < dev->nvqs; ++i) {
271 dev->vqs[i].log = NULL;
272 dev->vqs[i].indirect = NULL;
273 dev->vqs[i].heads = NULL;
232 dev->vqs[i].dev = dev; 274 dev->vqs[i].dev = dev;
233 mutex_init(&dev->vqs[i].mutex); 275 mutex_init(&dev->vqs[i].mutex);
234 vhost_vq_reset(dev, dev->vqs + i); 276 vhost_vq_reset(dev, dev->vqs + i);
@@ -295,6 +337,10 @@ static long vhost_dev_set_owner(struct vhost_dev *dev)
295 if (err) 337 if (err)
296 goto err_cgroup; 338 goto err_cgroup;
297 339
340 err = vhost_dev_alloc_iovecs(dev);
341 if (err)
342 goto err_cgroup;
343
298 return 0; 344 return 0;
299err_cgroup: 345err_cgroup:
300 kthread_stop(worker); 346 kthread_stop(worker);
@@ -345,6 +391,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev)
345 fput(dev->vqs[i].call); 391 fput(dev->vqs[i].call);
346 vhost_vq_reset(dev, dev->vqs + i); 392 vhost_vq_reset(dev, dev->vqs + i);
347 } 393 }
394 vhost_dev_free_iovecs(dev);
348 if (dev->log_ctx) 395 if (dev->log_ctx)
349 eventfd_ctx_put(dev->log_ctx); 396 eventfd_ctx_put(dev->log_ctx);
350 dev->log_ctx = NULL; 397 dev->log_ctx = NULL;
@@ -947,7 +994,7 @@ static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
947 } 994 }
948 995
949 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, 996 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
950 ARRAY_SIZE(vq->indirect)); 997 UIO_MAXIOV);
951 if (unlikely(ret < 0)) { 998 if (unlikely(ret < 0)) {
952 vq_err(vq, "Translation failure %d in indirect.\n", ret); 999 vq_err(vq, "Translation failure %d in indirect.\n", ret);
953 return ret; 1000 return ret;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index afd77295971c..edc892989992 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -15,11 +15,6 @@
15 15
16struct vhost_device; 16struct vhost_device;
17 17
18enum {
19 /* Enough place for all fragments, head, and virtio net header. */
20 VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
21};
22
23struct vhost_work; 18struct vhost_work;
24typedef void (*vhost_work_fn_t)(struct vhost_work *work); 19typedef void (*vhost_work_fn_t)(struct vhost_work *work);
25 20
@@ -93,12 +88,15 @@ struct vhost_virtqueue {
93 bool log_used; 88 bool log_used;
94 u64 log_addr; 89 u64 log_addr;
95 90
96 struct iovec indirect[VHOST_NET_MAX_SG]; 91 struct iovec iov[UIO_MAXIOV];
97 struct iovec iov[VHOST_NET_MAX_SG]; 92 /* hdr is used to store the virtio header.
98 struct iovec hdr[VHOST_NET_MAX_SG]; 93 * Since each iovec has >= 1 byte length, we never need more than
94 * header length entries to store the header. */
95 struct iovec hdr[sizeof(struct virtio_net_hdr_mrg_rxbuf)];
96 struct iovec *indirect;
99 size_t vhost_hlen; 97 size_t vhost_hlen;
100 size_t sock_hlen; 98 size_t sock_hlen;
101 struct vring_used_elem heads[VHOST_NET_MAX_SG]; 99 struct vring_used_elem *heads;
102 /* We use a kind of RCU to access private pointer. 100 /* We use a kind of RCU to access private pointer.
103 * All readers access it from worker, which makes it possible to 101 * All readers access it from worker, which makes it possible to
104 * flush the vhost_work instead of synchronize_rcu. Therefore readers do 102 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
@@ -109,7 +107,7 @@ struct vhost_virtqueue {
109 void *private_data; 107 void *private_data;
110 /* Log write descriptors */ 108 /* Log write descriptors */
111 void __user *log_base; 109 void __user *log_base;
112 struct vhost_log log[VHOST_NET_MAX_SG]; 110 struct vhost_log *log;
113}; 111};
114 112
115struct vhost_dev { 113struct vhost_dev {