aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
authorMichael Dalton <mwdalton@google.com>2014-01-17 01:23:26 -0500
committerDavid S. Miller <davem@davemloft.net>2014-01-17 02:46:06 -0500
commitfb51879dbceab9c40a39018d5322451691909e15 (patch)
tree918a386730836c9704728b05cee5b8b10764a335 /drivers/net/virtio_net.c
parent097b4f19e508015ca65a28ea4876740d35a19eea (diff)
virtio-net: use per-receive queue page frag alloc for mergeable bufs
The virtio-net driver currently uses netdev_alloc_frag() for GFP_ATOMIC mergeable rx buffer allocations. This commit migrates virtio-net to use per-receive queue page frags for GFP_ATOMIC allocation. This change unifies mergeable rx buffer memory allocation, which now will use skb_refill_frag() for both atomic and GFP-WAIT buffer allocations. To address fragmentation concerns, if after buffer allocation there is too little space left in the page frag to allocate a subsequent buffer, the remaining space is added to the current allocated buffer so that the remaining space can be used to store packet data. Acked-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael Dalton <mwdalton@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c69
1 files changed, 35 insertions, 34 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bd70aa87bf7..5ee71dccd092 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -75,6 +75,9 @@ struct receive_queue {
75 /* Chain pages by the private ptr. */ 75 /* Chain pages by the private ptr. */
76 struct page *pages; 76 struct page *pages;
77 77
78 /* Page frag for packet buffer allocation. */
79 struct page_frag alloc_frag;
80
78 /* RX: fragments + linear part + virtio header */ 81 /* RX: fragments + linear part + virtio header */
79 struct scatterlist sg[MAX_SKB_FRAGS + 2]; 82 struct scatterlist sg[MAX_SKB_FRAGS + 2];
80 83
@@ -123,11 +126,6 @@ struct virtnet_info {
123 /* Lock for config space updates */ 126 /* Lock for config space updates */
124 struct mutex config_lock; 127 struct mutex config_lock;
125 128
126 /* Page_frag for GFP_KERNEL packet buffer allocation when we run
127 * low on memory.
128 */
129 struct page_frag alloc_frag;
130
131 /* Does the affinity hint is set for virtqueues? */ 129 /* Does the affinity hint is set for virtqueues? */
132 bool affinity_hint_set; 130 bool affinity_hint_set;
133 131
@@ -333,8 +331,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
333 int num_buf = hdr->mhdr.num_buffers; 331 int num_buf = hdr->mhdr.num_buffers;
334 struct page *page = virt_to_head_page(buf); 332 struct page *page = virt_to_head_page(buf);
335 int offset = buf - page_address(page); 333 int offset = buf - page_address(page);
336 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, 334 unsigned int truesize = max_t(unsigned int, len, MERGE_BUFFER_LEN);
337 MERGE_BUFFER_LEN); 335 struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize);
338 struct sk_buff *curr_skb = head_skb; 336 struct sk_buff *curr_skb = head_skb;
339 337
340 if (unlikely(!curr_skb)) 338 if (unlikely(!curr_skb))
@@ -350,11 +348,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
350 dev->stats.rx_length_errors++; 348 dev->stats.rx_length_errors++;
351 goto err_buf; 349 goto err_buf;
352 } 350 }
353 if (unlikely(len > MERGE_BUFFER_LEN)) {
354 pr_debug("%s: rx error: merge buffer too long\n",
355 dev->name);
356 len = MERGE_BUFFER_LEN;
357 }
358 351
359 page = virt_to_head_page(buf); 352 page = virt_to_head_page(buf);
360 353
@@ -372,19 +365,20 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
372 head_skb->truesize += nskb->truesize; 365 head_skb->truesize += nskb->truesize;
373 num_skb_frags = 0; 366 num_skb_frags = 0;
374 } 367 }
368 truesize = max_t(unsigned int, len, MERGE_BUFFER_LEN);
375 if (curr_skb != head_skb) { 369 if (curr_skb != head_skb) {
376 head_skb->data_len += len; 370 head_skb->data_len += len;
377 head_skb->len += len; 371 head_skb->len += len;
378 head_skb->truesize += MERGE_BUFFER_LEN; 372 head_skb->truesize += truesize;
379 } 373 }
380 offset = buf - page_address(page); 374 offset = buf - page_address(page);
381 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { 375 if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) {
382 put_page(page); 376 put_page(page);
383 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, 377 skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1,
384 len, MERGE_BUFFER_LEN); 378 len, truesize);
385 } else { 379 } else {
386 skb_add_rx_frag(curr_skb, num_skb_frags, page, 380 skb_add_rx_frag(curr_skb, num_skb_frags, page,
387 offset, len, MERGE_BUFFER_LEN); 381 offset, len, truesize);
388 } 382 }
389 } 383 }
390 384
@@ -573,25 +567,24 @@ static int add_recvbuf_big(struct receive_queue *rq, gfp_t gfp)
573 567
574static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) 568static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp)
575{ 569{
576 struct virtnet_info *vi = rq->vq->vdev->priv; 570 struct page_frag *alloc_frag = &rq->alloc_frag;
577 char *buf = NULL; 571 char *buf;
578 int err; 572 int err;
573 unsigned int len, hole;
579 574
580 if (gfp & __GFP_WAIT) { 575 if (unlikely(!skb_page_frag_refill(MERGE_BUFFER_LEN, alloc_frag, gfp)))
581 if (skb_page_frag_refill(MERGE_BUFFER_LEN, &vi->alloc_frag,
582 gfp)) {
583 buf = (char *)page_address(vi->alloc_frag.page) +
584 vi->alloc_frag.offset;
585 get_page(vi->alloc_frag.page);
586 vi->alloc_frag.offset += MERGE_BUFFER_LEN;
587 }
588 } else {
589 buf = netdev_alloc_frag(MERGE_BUFFER_LEN);
590 }
591 if (!buf)
592 return -ENOMEM; 576 return -ENOMEM;
577 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
578 get_page(alloc_frag->page);
579 len = MERGE_BUFFER_LEN;
580 alloc_frag->offset += len;
581 hole = alloc_frag->size - alloc_frag->offset;
582 if (hole < MERGE_BUFFER_LEN) {
583 len += hole;
584 alloc_frag->offset += hole;
585 }
593 586
594 sg_init_one(rq->sg, buf, MERGE_BUFFER_LEN); 587 sg_init_one(rq->sg, buf, len);
595 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp); 588 err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, buf, gfp);
596 if (err < 0) 589 if (err < 0)
597 put_page(virt_to_head_page(buf)); 590 put_page(virt_to_head_page(buf));
@@ -612,6 +605,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
612 int err; 605 int err;
613 bool oom; 606 bool oom;
614 607
608 gfp |= __GFP_COLD;
615 do { 609 do {
616 if (vi->mergeable_rx_bufs) 610 if (vi->mergeable_rx_bufs)
617 err = add_recvbuf_mergeable(rq, gfp); 611 err = add_recvbuf_mergeable(rq, gfp);
@@ -1368,6 +1362,14 @@ static void free_receive_bufs(struct virtnet_info *vi)
1368 } 1362 }
1369} 1363}
1370 1364
1365static void free_receive_page_frags(struct virtnet_info *vi)
1366{
1367 int i;
1368 for (i = 0; i < vi->max_queue_pairs; i++)
1369 if (vi->rq[i].alloc_frag.page)
1370 put_page(vi->rq[i].alloc_frag.page);
1371}
1372
1371static void free_unused_bufs(struct virtnet_info *vi) 1373static void free_unused_bufs(struct virtnet_info *vi)
1372{ 1374{
1373 void *buf; 1375 void *buf;
@@ -1695,9 +1697,8 @@ free_recv_bufs:
1695 unregister_netdev(dev); 1697 unregister_netdev(dev);
1696free_vqs: 1698free_vqs:
1697 cancel_delayed_work_sync(&vi->refill); 1699 cancel_delayed_work_sync(&vi->refill);
1700 free_receive_page_frags(vi);
1698 virtnet_del_vqs(vi); 1701 virtnet_del_vqs(vi);
1699 if (vi->alloc_frag.page)
1700 put_page(vi->alloc_frag.page);
1701free_stats: 1702free_stats:
1702 free_percpu(vi->stats); 1703 free_percpu(vi->stats);
1703free: 1704free:
@@ -1714,6 +1715,8 @@ static void remove_vq_common(struct virtnet_info *vi)
1714 1715
1715 free_receive_bufs(vi); 1716 free_receive_bufs(vi);
1716 1717
1718 free_receive_page_frags(vi);
1719
1717 virtnet_del_vqs(vi); 1720 virtnet_del_vqs(vi);
1718} 1721}
1719 1722
@@ -1731,8 +1734,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1731 unregister_netdev(vi->dev); 1734 unregister_netdev(vi->dev);
1732 1735
1733 remove_vq_common(vi); 1736 remove_vq_common(vi);
1734 if (vi->alloc_frag.page)
1735 put_page(vi->alloc_frag.page);
1736 1737
1737 flush_work(&vi->config_work); 1738 flush_work(&vi->config_work);
1738 1739