aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-12-26 08:32:51 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-01-15 18:28:49 -0500
commit59d2a52eb56dcdaec3c81c456bf408cbab13bde6 (patch)
tree633dd8dc51e99badbd2c753ddf77a8f4fbbd7838
parente32904ec6251df8e552074c9eb068606955d894c (diff)
virtio-net: make all RX paths handle errors consistently
receive mergeable now handles errors internally. Do same for big and small packet paths, otherwise the logic is too hard to follow. Cc: Jason Wang <jasowang@redhat.com> Cc: David S. Miller <davem@davemloft.net> Acked-by: Michael Dalton <mwdalton@google.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> (cherry picked from commit f121159d72091f25afb22007c833e60a6845e912) Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/net/virtio_net.c56
1 files changed, 36 insertions, 20 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ad9b385e1f3d..ec0e9f236ff0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -294,6 +294,34 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
294 return skb; 294 return skb;
295} 295}
296 296
297static struct sk_buff *receive_small(void *buf, unsigned int len)
298{
299 struct sk_buff * skb = buf;
300
301 len -= sizeof(struct virtio_net_hdr);
302 skb_trim(skb, len);
303
304 return skb;
305}
306
307static struct sk_buff *receive_big(struct net_device *dev,
308 struct receive_queue *rq,
309 void *buf)
310{
311 struct page *page = buf;
312 struct sk_buff *skb = page_to_skb(rq, page, 0);
313
314 if (unlikely(!skb))
315 goto err;
316
317 return skb;
318
319err:
320 dev->stats.rx_dropped++;
321 give_pages(rq, page);
322 return NULL;
323}
324
297static struct sk_buff *receive_mergeable(struct net_device *dev, 325static struct sk_buff *receive_mergeable(struct net_device *dev,
298 struct receive_queue *rq, 326 struct receive_queue *rq,
299 void *buf, 327 void *buf,
@@ -357,7 +385,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
357 struct net_device *dev = vi->dev; 385 struct net_device *dev = vi->dev;
358 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 386 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
359 struct sk_buff *skb; 387 struct sk_buff *skb;
360 struct page *page;
361 struct skb_vnet_hdr *hdr; 388 struct skb_vnet_hdr *hdr;
362 389
363 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 390 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -369,26 +396,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
369 dev_kfree_skb(buf); 396 dev_kfree_skb(buf);
370 return; 397 return;
371 } 398 }
399 if (vi->mergeable_rx_bufs)
400 skb = receive_mergeable(dev, rq, buf, len);
401 else if (vi->big_packets)
402 skb = receive_big(dev, rq, buf);
403 else
404 skb = receive_small(buf, len);
372 405
373 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 406 if (unlikely(!skb))
374 skb = buf; 407 return;
375 len -= sizeof(struct virtio_net_hdr);
376 skb_trim(skb, len);
377 } else {
378 page = buf;
379 if (vi->mergeable_rx_bufs) {
380 skb = receive_mergeable(dev, rq, page, len);
381 if (unlikely(!skb))
382 return;
383 } else {
384 skb = page_to_skb(rq, page, len);
385 if (unlikely(!skb)) {
386 dev->stats.rx_dropped++;
387 give_pages(rq, page);
388 return;
389 }
390 }
391 }
392 408
393 hdr = skb_vnet_hdr(skb); 409 hdr = skb_vnet_hdr(skb);
394 410