aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJason Wang <jasowang@redhat.com>2016-12-23 09:37:32 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-23 13:48:56 -0500
commitbb91accf27335c6dc460e202991ca140fa21e1b5 (patch)
tree3fb73f7fd7545d53be8ba17d64285fffc4768eed
parentc47a43d3004ad6ff2a94a670cb3274cd6338d41e (diff)
virtio-net: XDP support for small buffers
Commit f600b6905015 ("virtio_net: Add XDP support") leaves the case of small receive buffer untouched. This will confuse the user who want to set XDP but use small buffers. Other than forbid XDP in small buffer mode, let's make it work. XDP then can only work at skb->data since virtio-net create skbs during refill, this is sub optimal which could be optimized in the future. Cc: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Acked-by: John Fastabend <john.r.fastabend@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/virtio_net.c112
1 files changed, 87 insertions, 25 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index e53365a86ca3..5deeda61d6d3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -333,9 +333,9 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
333static void virtnet_xdp_xmit(struct virtnet_info *vi, 333static void virtnet_xdp_xmit(struct virtnet_info *vi,
334 struct receive_queue *rq, 334 struct receive_queue *rq,
335 struct send_queue *sq, 335 struct send_queue *sq,
336 struct xdp_buff *xdp) 336 struct xdp_buff *xdp,
337 void *data)
337{ 338{
338 struct page *page = virt_to_head_page(xdp->data);
339 struct virtio_net_hdr_mrg_rxbuf *hdr; 339 struct virtio_net_hdr_mrg_rxbuf *hdr;
340 unsigned int num_sg, len; 340 unsigned int num_sg, len;
341 void *xdp_sent; 341 void *xdp_sent;
@@ -343,20 +343,45 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
343 343
344 /* Free up any pending old buffers before queueing new ones. */ 344 /* Free up any pending old buffers before queueing new ones. */
345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) { 345 while ((xdp_sent = virtqueue_get_buf(sq->vq, &len)) != NULL) {
346 struct page *sent_page = virt_to_head_page(xdp_sent); 346 if (vi->mergeable_rx_bufs) {
347 put_page(sent_page); 347 struct page *sent_page = virt_to_head_page(xdp_sent);
348
349 put_page(sent_page);
350 } else { /* small buffer */
351 struct sk_buff *skb = xdp_sent;
352
353 kfree_skb(skb);
354 }
348 } 355 }
349 356
350 /* Zero header and leave csum up to XDP layers */ 357 if (vi->mergeable_rx_bufs) {
351 hdr = xdp->data; 358 /* Zero header and leave csum up to XDP layers */
352 memset(hdr, 0, vi->hdr_len); 359 hdr = xdp->data;
360 memset(hdr, 0, vi->hdr_len);
361
362 num_sg = 1;
363 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data);
364 } else { /* small buffer */
365 struct sk_buff *skb = data;
353 366
354 num_sg = 1; 367 /* Zero header and leave csum up to XDP layers */
355 sg_init_one(sq->sg, xdp->data, xdp->data_end - xdp->data); 368 hdr = skb_vnet_hdr(skb);
369 memset(hdr, 0, vi->hdr_len);
370
371 num_sg = 2;
372 sg_init_table(sq->sg, 2);
373 sg_set_buf(sq->sg, hdr, vi->hdr_len);
374 skb_to_sgvec(skb, sq->sg + 1, 0, skb->len);
375 }
356 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, 376 err = virtqueue_add_outbuf(sq->vq, sq->sg, num_sg,
357 xdp->data, GFP_ATOMIC); 377 data, GFP_ATOMIC);
358 if (unlikely(err)) { 378 if (unlikely(err)) {
359 put_page(page); 379 if (vi->mergeable_rx_bufs) {
380 struct page *page = virt_to_head_page(xdp->data);
381
382 put_page(page);
383 } else /* small buffer */
384 kfree_skb(data);
360 return; // On error abort to avoid unnecessary kick 385 return; // On error abort to avoid unnecessary kick
361 } 386 }
362 387
@@ -366,23 +391,26 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
366static u32 do_xdp_prog(struct virtnet_info *vi, 391static u32 do_xdp_prog(struct virtnet_info *vi,
367 struct receive_queue *rq, 392 struct receive_queue *rq,
368 struct bpf_prog *xdp_prog, 393 struct bpf_prog *xdp_prog,
369 struct page *page, int offset, int len) 394 void *data, int len)
370{ 395{
371 int hdr_padded_len; 396 int hdr_padded_len;
372 struct xdp_buff xdp; 397 struct xdp_buff xdp;
398 void *buf;
373 unsigned int qp; 399 unsigned int qp;
374 u32 act; 400 u32 act;
375 u8 *buf;
376
377 buf = page_address(page) + offset;
378 401
379 if (vi->mergeable_rx_bufs) 402 if (vi->mergeable_rx_bufs) {
380 hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf); 403 hdr_padded_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
381 else 404 xdp.data = data + hdr_padded_len;
382 hdr_padded_len = sizeof(struct padded_vnet_hdr); 405 xdp.data_end = xdp.data + (len - vi->hdr_len);
406 buf = data;
407 } else { /* small buffers */
408 struct sk_buff *skb = data;
383 409
384 xdp.data = buf + hdr_padded_len; 410 xdp.data = skb->data;
385 xdp.data_end = xdp.data + (len - vi->hdr_len); 411 xdp.data_end = xdp.data + len;
412 buf = skb->data;
413 }
386 414
387 act = bpf_prog_run_xdp(xdp_prog, &xdp); 415 act = bpf_prog_run_xdp(xdp_prog, &xdp);
388 switch (act) { 416 switch (act) {
@@ -392,8 +420,8 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
392 qp = vi->curr_queue_pairs - 420 qp = vi->curr_queue_pairs -
393 vi->xdp_queue_pairs + 421 vi->xdp_queue_pairs +
394 smp_processor_id(); 422 smp_processor_id();
395 xdp.data = buf + (vi->mergeable_rx_bufs ? 0 : 4); 423 xdp.data = buf;
396 virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp); 424 virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
397 return XDP_TX; 425 return XDP_TX;
398 default: 426 default:
399 bpf_warn_invalid_xdp_action(act); 427 bpf_warn_invalid_xdp_action(act);
@@ -403,14 +431,47 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
403 } 431 }
404} 432}
405 433
406static struct sk_buff *receive_small(struct virtnet_info *vi, void *buf, unsigned int len) 434static struct sk_buff *receive_small(struct net_device *dev,
435 struct virtnet_info *vi,
436 struct receive_queue *rq,
437 void *buf, unsigned int len)
407{ 438{
408 struct sk_buff * skb = buf; 439 struct sk_buff * skb = buf;
440 struct bpf_prog *xdp_prog;
409 441
410 len -= vi->hdr_len; 442 len -= vi->hdr_len;
411 skb_trim(skb, len); 443 skb_trim(skb, len);
412 444
445 rcu_read_lock();
446 xdp_prog = rcu_dereference(rq->xdp_prog);
447 if (xdp_prog) {
448 struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
449 u32 act;
450
451 if (unlikely(hdr->hdr.gso_type || hdr->hdr.flags))
452 goto err_xdp;
453 act = do_xdp_prog(vi, rq, xdp_prog, skb, len);
454 switch (act) {
455 case XDP_PASS:
456 break;
457 case XDP_TX:
458 rcu_read_unlock();
459 goto xdp_xmit;
460 case XDP_DROP:
461 default:
462 goto err_xdp;
463 }
464 }
465 rcu_read_unlock();
466
413 return skb; 467 return skb;
468
469err_xdp:
470 rcu_read_unlock();
471 dev->stats.rx_dropped++;
472 kfree_skb(skb);
473xdp_xmit:
474 return NULL;
414} 475}
415 476
416static struct sk_buff *receive_big(struct net_device *dev, 477static struct sk_buff *receive_big(struct net_device *dev,
@@ -537,7 +598,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
537 if (unlikely(hdr->hdr.gso_type)) 598 if (unlikely(hdr->hdr.gso_type))
538 goto err_xdp; 599 goto err_xdp;
539 600
540 act = do_xdp_prog(vi, rq, xdp_prog, xdp_page, offset, len); 601 act = do_xdp_prog(vi, rq, xdp_prog,
602 page_address(xdp_page) + offset, len);
541 switch (act) { 603 switch (act) {
542 case XDP_PASS: 604 case XDP_PASS:
543 /* We can only create skb based on xdp_page. */ 605 /* We can only create skb based on xdp_page. */
@@ -672,7 +734,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
672 else if (vi->big_packets) 734 else if (vi->big_packets)
673 skb = receive_big(dev, vi, rq, buf, len); 735 skb = receive_big(dev, vi, rq, buf, len);
674 else 736 else
675 skb = receive_small(vi, buf, len); 737 skb = receive_small(dev, vi, rq, buf, len);
676 738
677 if (unlikely(!skb)) 739 if (unlikely(!skb))
678 return; 740 return;