aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2008-04-17 23:24:27 -0400
committerRusty Russell <rusty@rustcorp.com.au>2008-07-24 22:06:01 -0400
commit97402b96f87c6e32f75f1bffdd91a5ee144b679d (patch)
tree7135f703666079dbc15fc972941179f05bd63972 /drivers
parenta9ea3fc6f2654a7407864fec983d1671d775b5ee (diff)
virtio net: Allow receiving SG packets
Finally this patch lets virtio_net receive GSO packets in addition to sending them. This can definitely be optimised for the non-GSO case. For comparison the Xen approach stores one page in each skb and uses subsequent skb's pages to construct an SG skb instead of preallocating the maximum amount of pages per skb. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> (added feature bits)
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/virtio_net.c44
1 files changed, 39 insertions, 5 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index ce37a7e9541c..0886b8a2d92d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -55,6 +55,9 @@ struct virtnet_info
55 struct tasklet_struct tasklet; 55 struct tasklet_struct tasklet;
56 bool free_in_tasklet; 56 bool free_in_tasklet;
57 57
58 /* I like... big packets and I cannot lie! */
59 bool big_packets;
60
58 /* Receive & send queues. */ 61 /* Receive & send queues. */
59 struct sk_buff_head recv; 62 struct sk_buff_head recv;
60 struct sk_buff_head send; 63 struct sk_buff_head send;
@@ -89,6 +92,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
89 unsigned len) 92 unsigned len)
90{ 93{
91 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 94 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
95 int err;
92 96
93 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) { 97 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
94 pr_debug("%s: short packet %i\n", dev->name, len); 98 pr_debug("%s: short packet %i\n", dev->name, len);
@@ -96,10 +100,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
96 goto drop; 100 goto drop;
97 } 101 }
98 len -= sizeof(struct virtio_net_hdr); 102 len -= sizeof(struct virtio_net_hdr);
99 BUG_ON(len > MAX_PACKET_LEN);
100
101 skb_trim(skb, len);
102 103
104 err = pskb_trim(skb, len);
105 if (err) {
106 pr_debug("%s: pskb_trim failed %i %d\n", dev->name, len, err);
107 dev->stats.rx_dropped++;
108 goto drop;
109 }
110 skb->truesize += skb->data_len;
103 dev->stats.rx_bytes += skb->len; 111 dev->stats.rx_bytes += skb->len;
104 dev->stats.rx_packets++; 112 dev->stats.rx_packets++;
105 113
@@ -161,7 +169,7 @@ static void try_fill_recv(struct virtnet_info *vi)
161{ 169{
162 struct sk_buff *skb; 170 struct sk_buff *skb;
163 struct scatterlist sg[2+MAX_SKB_FRAGS]; 171 struct scatterlist sg[2+MAX_SKB_FRAGS];
164 int num, err; 172 int num, err, i;
165 173
166 sg_init_table(sg, 2+MAX_SKB_FRAGS); 174 sg_init_table(sg, 2+MAX_SKB_FRAGS);
167 for (;;) { 175 for (;;) {
@@ -171,6 +179,24 @@ static void try_fill_recv(struct virtnet_info *vi)
171 179
172 skb_put(skb, MAX_PACKET_LEN); 180 skb_put(skb, MAX_PACKET_LEN);
173 vnet_hdr_to_sg(sg, skb); 181 vnet_hdr_to_sg(sg, skb);
182
183 if (vi->big_packets) {
184 for (i = 0; i < MAX_SKB_FRAGS; i++) {
185 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
186 f->page = alloc_page(GFP_ATOMIC);
187 if (!f->page)
188 break;
189
190 f->page_offset = 0;
191 f->size = PAGE_SIZE;
192
193 skb->data_len += PAGE_SIZE;
194 skb->len += PAGE_SIZE;
195
196 skb_shinfo(skb)->nr_frags++;
197 }
198 }
199
174 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 200 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
175 skb_queue_head(&vi->recv, skb); 201 skb_queue_head(&vi->recv, skb);
176 202
@@ -485,6 +511,12 @@ static int virtnet_probe(struct virtio_device *vdev)
485 * the timer. */ 511 * the timer. */
486 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY); 512 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
487 513
514 /* If we can receive ANY GSO packets, we must allocate large ones. */
515 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
516 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
517 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
518 vi->big_packets = true;
519
488 /* We expect two virtqueues, receive then send. */ 520 /* We expect two virtqueues, receive then send. */
489 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done); 521 vi->rvq = vdev->config->find_vq(vdev, 0, skb_recv_done);
490 if (IS_ERR(vi->rvq)) { 522 if (IS_ERR(vi->rvq)) {
@@ -571,7 +603,9 @@ static unsigned int features[] = {
571 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 603 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
572 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 604 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
573 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 605 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
574 VIRTIO_NET_F_HOST_ECN, VIRTIO_F_NOTIFY_ON_EMPTY, 606 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
607 VIRTIO_NET_F_GUEST_ECN, /* We don't yet handle UFO input. */
608 VIRTIO_F_NOTIFY_ON_EMPTY,
575}; 609};
576 610
577static struct virtio_driver virtio_net = { 611static struct virtio_driver virtio_net = {