aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/vhost/net.c
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2011-01-17 03:11:08 -0500
committerMichael S. Tsirkin <mst@redhat.com>2011-03-13 11:00:10 -0400
commit94249369e9930276e30087da205349a55478cbb5 (patch)
treeb5040c7bb90bbd772f0692c7f6a367d5ce758f37 /drivers/vhost/net.c
parentcfbdab951369f15de890597530076bf0119361be (diff)
vhost-net: Unify the code of mergeable and big buffer handling
Codes duplication were found between the handling of mergeable and big buffers, so this patch tries to unify them. This could be easily done by adding a quota to the get_rx_bufs() which is used to limit the number of buffers it returns (for mergeable buffer, the quota is simply UIO_MAXIOV, for big buffers, the quota is just 1), and then the previous handle_rx_mergeable() could be resued also for big buffers. Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Diffstat (limited to 'drivers/vhost/net.c')
-rw-r--r--drivers/vhost/net.c128
1 files changed, 7 insertions, 121 deletions
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9f57cd45fe8f..0329c411bbf1 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -229,6 +229,7 @@ static int peek_head_len(struct sock *sk)
229 * @iovcount - returned count of io vectors we fill 229 * @iovcount - returned count of io vectors we fill
230 * @log - vhost log 230 * @log - vhost log
231 * @log_num - log offset 231 * @log_num - log offset
232 * @quota - headcount quota, 1 for big buffer
232 * returns number of buffer heads allocated, negative on error 233 * returns number of buffer heads allocated, negative on error
233 */ 234 */
234static int get_rx_bufs(struct vhost_virtqueue *vq, 235static int get_rx_bufs(struct vhost_virtqueue *vq,
@@ -236,7 +237,8 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
236 int datalen, 237 int datalen,
237 unsigned *iovcount, 238 unsigned *iovcount,
238 struct vhost_log *log, 239 struct vhost_log *log,
239 unsigned *log_num) 240 unsigned *log_num,
241 unsigned int quota)
240{ 242{
241 unsigned int out, in; 243 unsigned int out, in;
242 int seg = 0; 244 int seg = 0;
@@ -244,7 +246,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
244 unsigned d; 246 unsigned d;
245 int r, nlogs = 0; 247 int r, nlogs = 0;
246 248
247 while (datalen > 0) { 249 while (datalen > 0 && headcount < quota) {
248 if (unlikely(seg >= UIO_MAXIOV)) { 250 if (unlikely(seg >= UIO_MAXIOV)) {
249 r = -ENOBUFS; 251 r = -ENOBUFS;
250 goto err; 252 goto err;
@@ -284,116 +286,7 @@ err:
284 286
285/* Expects to be always run from workqueue - which acts as 287/* Expects to be always run from workqueue - which acts as
286 * read-size critical section for our kind of RCU. */ 288 * read-size critical section for our kind of RCU. */
287static void handle_rx_big(struct vhost_net *net) 289static void handle_rx(struct vhost_net *net)
288{
289 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
290 unsigned out, in, log, s;
291 int head;
292 struct vhost_log *vq_log;
293 struct msghdr msg = {
294 .msg_name = NULL,
295 .msg_namelen = 0,
296 .msg_control = NULL, /* FIXME: get and handle RX aux data. */
297 .msg_controllen = 0,
298 .msg_iov = vq->iov,
299 .msg_flags = MSG_DONTWAIT,
300 };
301 struct virtio_net_hdr hdr = {
302 .flags = 0,
303 .gso_type = VIRTIO_NET_HDR_GSO_NONE
304 };
305 size_t len, total_len = 0;
306 int err;
307 size_t hdr_size;
308 /* TODO: check that we are running from vhost_worker? */
309 struct socket *sock = rcu_dereference_check(vq->private_data, 1);
310
311 if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
312 return;
313
314 mutex_lock(&vq->mutex);
315 vhost_disable_notify(vq);
316 hdr_size = vq->vhost_hlen;
317
318 vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
319 vq->log : NULL;
320
321 for (;;) {
322 head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
323 ARRAY_SIZE(vq->iov),
324 &out, &in,
325 vq_log, &log);
326 /* On error, stop handling until the next kick. */
327 if (unlikely(head < 0))
328 break;
329 /* OK, now we need to know about added descriptors. */
330 if (head == vq->num) {
331 if (unlikely(vhost_enable_notify(vq))) {
332 /* They have slipped one in as we were
333 * doing that: check again. */
334 vhost_disable_notify(vq);
335 continue;
336 }
337 /* Nothing new? Wait for eventfd to tell us
338 * they refilled. */
339 break;
340 }
341 /* We don't need to be notified again. */
342 if (out) {
343 vq_err(vq, "Unexpected descriptor format for RX: "
344 "out %d, int %d\n",
345 out, in);
346 break;
347 }
348 /* Skip header. TODO: support TSO/mergeable rx buffers. */
349 s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
350 msg.msg_iovlen = in;
351 len = iov_length(vq->iov, in);
352 /* Sanity check */
353 if (!len) {
354 vq_err(vq, "Unexpected header len for RX: "
355 "%zd expected %zd\n",
356 iov_length(vq->hdr, s), hdr_size);
357 break;
358 }
359 err = sock->ops->recvmsg(NULL, sock, &msg,
360 len, MSG_DONTWAIT | MSG_TRUNC);
361 /* TODO: Check specific error and bomb out unless EAGAIN? */
362 if (err < 0) {
363 vhost_discard_vq_desc(vq, 1);
364 break;
365 }
366 /* TODO: Should check and handle checksum. */
367 if (err > len) {
368 pr_debug("Discarded truncated rx packet: "
369 " len %d > %zd\n", err, len);
370 vhost_discard_vq_desc(vq, 1);
371 continue;
372 }
373 len = err;
374 err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
375 if (err) {
376 vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
377 vq->iov->iov_base, err);
378 break;
379 }
380 len += hdr_size;
381 vhost_add_used_and_signal(&net->dev, vq, head, len);
382 if (unlikely(vq_log))
383 vhost_log_write(vq, vq_log, log, len);
384 total_len += len;
385 if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
386 vhost_poll_queue(&vq->poll);
387 break;
388 }
389 }
390
391 mutex_unlock(&vq->mutex);
392}
393
394/* Expects to be always run from workqueue - which acts as
395 * read-size critical section for our kind of RCU. */
396static void handle_rx_mergeable(struct vhost_net *net)
397{ 290{
398 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; 291 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
399 unsigned uninitialized_var(in), log; 292 unsigned uninitialized_var(in), log;
@@ -433,7 +326,8 @@ static void handle_rx_mergeable(struct vhost_net *net)
433 sock_len += sock_hlen; 326 sock_len += sock_hlen;
434 vhost_len = sock_len + vhost_hlen; 327 vhost_len = sock_len + vhost_hlen;
435 headcount = get_rx_bufs(vq, vq->heads, vhost_len, 328 headcount = get_rx_bufs(vq, vq->heads, vhost_len,
436 &in, vq_log, &log); 329 &in, vq_log, &log,
330 likely(mergeable) ? UIO_MAXIOV : 1);
437 /* On error, stop handling until the next kick. */ 331 /* On error, stop handling until the next kick. */
438 if (unlikely(headcount < 0)) 332 if (unlikely(headcount < 0))
439 break; 333 break;
@@ -499,14 +393,6 @@ static void handle_rx_mergeable(struct vhost_net *net)
499 mutex_unlock(&vq->mutex); 393 mutex_unlock(&vq->mutex);
500} 394}
501 395
502static void handle_rx(struct vhost_net *net)
503{
504 if (vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF))
505 handle_rx_mergeable(net);
506 else
507 handle_rx_big(net);
508}
509
510static void handle_tx_kick(struct vhost_work *work) 396static void handle_tx_kick(struct vhost_work *work)
511{ 397{
512 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, 398 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,