diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-07-25 13:06:01 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-07-24 22:06:02 -0400 |
commit | fb6813f480806d62361719e84777c8e00d3e86a8 (patch) | |
tree | 800f68b515a39b554ed3a852ba36ea54c1beef8c /drivers/net/virtio_net.c | |
parent | 97402b96f87c6e32f75f1bffdd91a5ee144b679d (diff) |
virtio: Recycle unused recv buffer pages for large skbs in net driver
If we hack the virtio_net driver to always allocate full-sized (64k+)
skbuffs, the driver slows down (lguest numbers):
Time to receive 1GB (small buffers): 10.85 seconds
Time to receive 1GB (64k+ buffers): 24.75 seconds
Of course, large buffers use up more space in the ring, so we increase
that from 128 to 2048:
Time to receive 1GB (64k+ buffers, 2k ring): 16.61 seconds
If we recycle pages rather than using alloc_page/free_page:
Time to receive 1GB (64k+ buffers, 2k ring, recycle pages): 10.81 seconds
This demonstrates that with efficient allocation, we don't need to
have a separate "small buffer" queue.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r-- | drivers/net/virtio_net.c | 36 |
1 files changed, 35 insertions, 1 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0886b8a2d92d..0196a0df9021 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -61,6 +61,9 @@ struct virtnet_info | |||
61 | /* Receive & send queues. */ | 61 | /* Receive & send queues. */ |
62 | struct sk_buff_head recv; | 62 | struct sk_buff_head recv; |
63 | struct sk_buff_head send; | 63 | struct sk_buff_head send; |
64 | |||
65 | /* Chain pages by the private ptr. */ | ||
66 | struct page *pages; | ||
64 | }; | 67 | }; |
65 | 68 | ||
66 | static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) | 69 | static inline struct virtio_net_hdr *skb_vnet_hdr(struct sk_buff *skb) |
@@ -73,6 +76,23 @@ static inline void vnet_hdr_to_sg(struct scatterlist *sg, struct sk_buff *skb) | |||
73 | sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); | 76 | sg_init_one(sg, skb_vnet_hdr(skb), sizeof(struct virtio_net_hdr)); |
74 | } | 77 | } |
75 | 78 | ||
79 | static void give_a_page(struct virtnet_info *vi, struct page *page) | ||
80 | { | ||
81 | page->private = (unsigned long)vi->pages; | ||
82 | vi->pages = page; | ||
83 | } | ||
84 | |||
85 | static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask) | ||
86 | { | ||
87 | struct page *p = vi->pages; | ||
88 | |||
89 | if (p) | ||
90 | vi->pages = (struct page *)p->private; | ||
91 | else | ||
92 | p = alloc_page(gfp_mask); | ||
93 | return p; | ||
94 | } | ||
95 | |||
76 | static void skb_xmit_done(struct virtqueue *svq) | 96 | static void skb_xmit_done(struct virtqueue *svq) |
77 | { | 97 | { |
78 | struct virtnet_info *vi = svq->vdev->priv; | 98 | struct virtnet_info *vi = svq->vdev->priv; |
@@ -101,6 +121,15 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb, | |||
101 | } | 121 | } |
102 | len -= sizeof(struct virtio_net_hdr); | 122 | len -= sizeof(struct virtio_net_hdr); |
103 | 123 | ||
124 | if (len <= MAX_PACKET_LEN) { | ||
125 | unsigned int i; | ||
126 | |||
127 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) | ||
128 | give_a_page(dev->priv, skb_shinfo(skb)->frags[i].page); | ||
129 | skb->data_len = 0; | ||
130 | skb_shinfo(skb)->nr_frags = 0; | ||
131 | } | ||
132 | |||
104 | err = pskb_trim(skb, len); | 133 | err = pskb_trim(skb, len); |
105 | if (err) { | 134 | if (err) { |
106 | pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); | 135 | pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err); |
@@ -183,7 +212,7 @@ static void try_fill_recv(struct virtnet_info *vi) | |||
183 | if (vi->big_packets) { | 212 | if (vi->big_packets) { |
184 | for (i = 0; i < MAX_SKB_FRAGS; i++) { | 213 | for (i = 0; i < MAX_SKB_FRAGS; i++) { |
185 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; | 214 | skb_frag_t *f = &skb_shinfo(skb)->frags[i]; |
186 | f->page = alloc_page(GFP_ATOMIC); | 215 | f->page = get_a_page(vi, GFP_ATOMIC); |
187 | if (!f->page) | 216 | if (!f->page) |
188 | break; | 217 | break; |
189 | 218 | ||
@@ -506,6 +535,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
506 | vi->dev = dev; | 535 | vi->dev = dev; |
507 | vi->vdev = vdev; | 536 | vi->vdev = vdev; |
508 | vdev->priv = vi; | 537 | vdev->priv = vi; |
538 | vi->pages = NULL; | ||
509 | 539 | ||
510 | /* If they give us a callback when all buffers are done, we don't need | 540 | /* If they give us a callback when all buffers are done, we don't need |
511 | * the timer. */ | 541 | * the timer. */ |
@@ -591,6 +621,10 @@ static void virtnet_remove(struct virtio_device *vdev) | |||
591 | vdev->config->del_vq(vi->svq); | 621 | vdev->config->del_vq(vi->svq); |
592 | vdev->config->del_vq(vi->rvq); | 622 | vdev->config->del_vq(vi->rvq); |
593 | unregister_netdev(vi->dev); | 623 | unregister_netdev(vi->dev); |
624 | |||
625 | while (vi->pages) | ||
626 | __free_pages(get_a_page(vi, GFP_KERNEL), 0); | ||
627 | |||
594 | free_netdev(vi->dev); | 628 | free_netdev(vi->dev); |
595 | } | 629 | } |
596 | 630 | ||