aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorMichael S. Tsirkin <mst@redhat.com>2013-11-28 06:30:59 -0500
committerDavid S. Miller <davem@davemloft.net>2013-12-01 20:27:16 -0500
commitf121159d72091f25afb22007c833e60a6845e912 (patch)
treed9570571764a2a1f74e0af9619464228e2af77c6 /drivers/net
parent8fc3b9e9a229778e5af3aa453c44f1a3857ba769 (diff)
virtio_net: make all RX paths handle erors consistently
receive mergeable now handles errors internally. Do same for big and small packet paths, otherwise the logic is too hard to follow. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/virtio_net.c54
1 files changed, 37 insertions, 17 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 71a2eac7b039..916241d16c67 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -299,6 +299,35 @@ static struct sk_buff *page_to_skb(struct receive_queue *rq,
299 return skb; 299 return skb;
300} 300}
301 301
302static struct sk_buff *receive_small(void *buf, unsigned int len)
303{
304 struct sk_buff * skb = buf;
305
306 len -= sizeof(struct virtio_net_hdr);
307 skb_trim(skb, len);
308
309 return skb;
310}
311
312static struct sk_buff *receive_big(struct net_device *dev,
313 struct receive_queue *rq,
314 void *buf,
315 unsigned int len)
316{
317 struct page *page = buf;
318 struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE);
319
320 if (unlikely(!skb))
321 goto err;
322
323 return skb;
324
325err:
326 dev->stats.rx_dropped++;
327 give_pages(rq, page);
328 return NULL;
329}
330
302static struct sk_buff *receive_mergeable(struct net_device *dev, 331static struct sk_buff *receive_mergeable(struct net_device *dev,
303 struct receive_queue *rq, 332 struct receive_queue *rq,
304 void *buf, 333 void *buf,
@@ -392,7 +421,6 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
392 struct net_device *dev = vi->dev; 421 struct net_device *dev = vi->dev;
393 struct virtnet_stats *stats = this_cpu_ptr(vi->stats); 422 struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
394 struct sk_buff *skb; 423 struct sk_buff *skb;
395 struct page *page;
396 struct skb_vnet_hdr *hdr; 424 struct skb_vnet_hdr *hdr;
397 425
398 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 426 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
@@ -407,23 +435,15 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
407 return; 435 return;
408 } 436 }
409 437
410 if (!vi->mergeable_rx_bufs && !vi->big_packets) { 438 if (vi->mergeable_rx_bufs)
411 skb = buf;
412 len -= sizeof(struct virtio_net_hdr);
413 skb_trim(skb, len);
414 } else if (vi->mergeable_rx_bufs) {
415 skb = receive_mergeable(dev, rq, buf, len); 439 skb = receive_mergeable(dev, rq, buf, len);
416 if (unlikely(!skb)) 440 else if (vi->big_packets)
417 return; 441 skb = receive_big(dev, rq, buf, len);
418 } else { 442 else
419 page = buf; 443 skb = receive_small(buf, len);
420 skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); 444
421 if (unlikely(!skb)) { 445 if (unlikely(!skb))
422 dev->stats.rx_dropped++; 446 return;
423 give_pages(rq, page);
424 return;
425 }
426 }
427 447
428 hdr = skb_vnet_hdr(skb); 448 hdr = skb_vnet_hdr(skb);
429 449