aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-12-23 09:37:31 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-23 13:48:55 -0500
commitc47a43d3004ad6ff2a94a670cb3274cd6338d41e (patch)
tree5c0eb7b398598e3b5d1684c9f3648e00c9924155
parent92502fe86c7c9b3f8543f29641a3c71805e82757 (diff)
virtio-net: remove big packet XDP codes
Now we in fact don't allow XDP for big packets, remove its codes. Cc: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/virtio_net.c44
1 files changed, 3 insertions, 41 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index c1f66d8bfb7b..e53365a86ca3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -344,11 +344,7 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
344 /* Free up any pending old buffers before queueing new ones. */ 344 /* Free up any pending old buffers before queueing new ones. */
345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
346 struct page *sent_page = virt_to_head_page(xdp_sent); 346 struct page *sent_page = virt_to_head_page(xdp_sent);
347 347 put_page(sent_page);
348 if (vi->mergeable_rx_bufs)
349 put_page(sent_page);
350 else
351 give_pages(rq, sent_page);
352 } 348 }
353 349
354 /* Zero header and leave csum up to XDP layers */ 350 /* Zero header and leave csum up to XDP layers */
@@ -360,15 +356,8 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
360 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, 356 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
361 xdp->data, GFP_ATOMIC); 357 xdp->data, GFP_ATOMIC);
362 if (unlikely(err)) { 358 if (unlikely(err)) {
363 if (vi->mergeable_rx_bufs) 359 put_page(page);
364 put_page(page);
365 else
366 give_pages(rq, page);
367 return; // On error abort to avoid unnecessary kick 360 return; // On error abort to avoid unnecessary kick
368 } else if (!vi->mergeable_rx_bufs) {
369 /* If not mergeable bufs must be big packets so cleanup pages */
370 give_pages(rq, (struct page *)page->private);
371 page->private = 0;
372 } 361 }
373 362
374 virtqueue_kick(sq->vq); 363 virtqueue_kick(sq->vq);
@@ -430,44 +419,17 @@ static struct sk_buff *receive_big(struct net_device *dev,
430 void *buf, 419 void *buf,
431 unsigned int len) 420 unsigned int len)
432{ 421{
433 struct bpf_prog *xdp_prog;
434 struct page *page = buf; 422 struct page *page = buf;
435 struct sk_buff *skb; 423 struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
436
437 rcu_read_lock();
438 xdp_prog = rcu_dereference(rq->xdp_prog);
439 if (xdp_prog) {
440 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
441 u32 act;
442
443 if (unlikely(hdr->hdr.gso_type))
444 goto err_xdp;
445 act = do_xdp_prog(vi, rq, xdp_prog, page, 0, len);
446 switch (act) {
447 case XDP_PASS:
448 break;
449 case XDP_TX:
450 rcu_read_unlock();
451 goto xdp_xmit;
452 case XDP_DROP:
453 default:
454 goto err_xdp;
455 }
456 }
457 rcu_read_unlock();
458 424
459 skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE);
460 if (unlikely(!skb)) 425 if (unlikely(!skb))
461 goto err; 426 goto err;
462 427
463 return skb; 428 return skb;
464 429
465err_xdp:
466 rcu_read_unlock();
467err: 430err:
468 dev->stats.rx_dropped++; 431 dev->stats.rx_dropped++;
469 give_pages(rq, page); 432 give_pages(rq, page);
470xdp_xmit:
471 return NULL; 433 return NULL;
472} 434}
473 435