aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/virtio_net.c229
1 files changed, 88 insertions, 141 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 5c498d2b043f..d445845f2779 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1,4 +1,4 @@
1/* A simple network driver using virtio. 1/* A network driver using virtio.
2 * 2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation 3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 * 4 *
@@ -48,19 +48,9 @@ struct virtnet_info
48 struct napi_struct napi; 48 struct napi_struct napi;
49 unsigned int status; 49 unsigned int status;
50 50
51 /* The skb we couldn't send because buffers were full. */
52 struct sk_buff *last_xmit_skb;
53
54 /* If we need to free in a timer, this is it. */
55 struct timer_list xmit_free_timer;
56
57 /* Number of input buffers, and max we've ever had. */ 51 /* Number of input buffers, and max we've ever had. */
58 unsigned int num, max; 52 unsigned int num, max;
59 53
60 /* For cleaning up after transmission. */
61 struct tasklet_struct tasklet;
62 bool free_in_tasklet;
63
64 /* I like... big packets and I cannot lie! */ 54 /* I like... big packets and I cannot lie! */
65 bool big_packets; 55 bool big_packets;
66 56
@@ -78,9 +68,17 @@ struct virtnet_info
78 struct page *pages; 68 struct page *pages;
79}; 69};
80 70
81static inline void *skb_vnet_hdr(struct sk_buff *skb) 71struct skb_vnet_hdr {
72 union {
73 struct virtio_net_hdr hdr;
74 struct virtio_net_hdr_mrg_rxbuf mhdr;
75 };
76 unsigned int num_sg;
77};
78
79static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
82{ 80{
83 return (struct virtio_net_hdr *)skb->cb; 81 return (struct skb_vnet_hdr *)skb->cb;
84} 82}
85 83
86static void give_a_page(struct virtnet_info *vi, struct page *page) 84static void give_a_page(struct virtnet_info *vi, struct page *page)
@@ -119,17 +117,13 @@ static void skb_xmit_done(struct virtqueue *svq)
119 117
120 /* We were probably waiting for more output buffers. */ 118 /* We were probably waiting for more output buffers. */
121 netif_wake_queue(vi->dev); 119 netif_wake_queue(vi->dev);
122
123 /* Make sure we re-xmit last_xmit_skb: if there are no more packets
124 * queued, start_xmit won't be called. */
125 tasklet_schedule(&vi->tasklet);
126} 120}
127 121
128static void receive_skb(struct net_device *dev, struct sk_buff *skb, 122static void receive_skb(struct net_device *dev, struct sk_buff *skb,
129 unsigned len) 123 unsigned len)
130{ 124{
131 struct virtnet_info *vi = netdev_priv(dev); 125 struct virtnet_info *vi = netdev_priv(dev);
132 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb); 126 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
133 int err; 127 int err;
134 int i; 128 int i;
135 129
@@ -140,7 +134,6 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
140 } 134 }
141 135
142 if (vi->mergeable_rx_bufs) { 136 if (vi->mergeable_rx_bufs) {
143 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
144 unsigned int copy; 137 unsigned int copy;
145 char *p = page_address(skb_shinfo(skb)->frags[0].page); 138 char *p = page_address(skb_shinfo(skb)->frags[0].page);
146 139
@@ -148,8 +141,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
148 len = PAGE_SIZE; 141 len = PAGE_SIZE;
149 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf); 142 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
150 143
151 memcpy(hdr, p, sizeof(*mhdr)); 144 memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
152 p += sizeof(*mhdr); 145 p += sizeof(hdr->mhdr);
153 146
154 copy = len; 147 copy = len;
155 if (copy > skb_tailroom(skb)) 148 if (copy > skb_tailroom(skb))
@@ -164,13 +157,13 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
164 skb_shinfo(skb)->nr_frags--; 157 skb_shinfo(skb)->nr_frags--;
165 } else { 158 } else {
166 skb_shinfo(skb)->frags[0].page_offset += 159 skb_shinfo(skb)->frags[0].page_offset +=
167 sizeof(*mhdr) + copy; 160 sizeof(hdr->mhdr) + copy;
168 skb_shinfo(skb)->frags[0].size = len; 161 skb_shinfo(skb)->frags[0].size = len;
169 skb->data_len += len; 162 skb->data_len += len;
170 skb->len += len; 163 skb->len += len;
171 } 164 }
172 165
173 while (--mhdr->num_buffers) { 166 while (--hdr->mhdr.num_buffers) {
174 struct sk_buff *nskb; 167 struct sk_buff *nskb;
175 168
176 i = skb_shinfo(skb)->nr_frags; 169 i = skb_shinfo(skb)->nr_frags;
@@ -184,7 +177,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
184 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len); 177 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
185 if (!nskb) { 178 if (!nskb) {
186 pr_debug("%s: rx error: %d buffers missing\n", 179 pr_debug("%s: rx error: %d buffers missing\n",
187 dev->name, mhdr->num_buffers); 180 dev->name, hdr->mhdr.num_buffers);
188 dev->stats.rx_length_errors++; 181 dev->stats.rx_length_errors++;
189 goto drop; 182 goto drop;
190 } 183 }
@@ -205,7 +198,7 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
205 skb->len += len; 198 skb->len += len;
206 } 199 }
207 } else { 200 } else {
208 len -= sizeof(struct virtio_net_hdr); 201 len -= sizeof(hdr->hdr);
209 202
210 if (len <= MAX_PACKET_LEN) 203 if (len <= MAX_PACKET_LEN)
211 trim_pages(vi, skb); 204 trim_pages(vi, skb);
@@ -223,9 +216,11 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
223 dev->stats.rx_bytes += skb->len; 216 dev->stats.rx_bytes += skb->len;
224 dev->stats.rx_packets++; 217 dev->stats.rx_packets++;
225 218
226 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 219 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
227 pr_debug("Needs csum!\n"); 220 pr_debug("Needs csum!\n");
228 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset)) 221 if (!skb_partial_csum_set(skb,
222 hdr->hdr.csum_start,
223 hdr->hdr.csum_offset))
229 goto frame_err; 224 goto frame_err;
230 } 225 }
231 226
@@ -233,9 +228,9 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
233 pr_debug("Receiving skb proto 0x%04x len %i type %i\n", 228 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
234 ntohs(skb->protocol), skb->len, skb->pkt_type); 229 ntohs(skb->protocol), skb->len, skb->pkt_type);
235 230
236 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 231 if (hdr->hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
237 pr_debug("GSO!\n"); 232 pr_debug("GSO!\n");
238 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 233 switch (hdr->hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
239 case VIRTIO_NET_HDR_GSO_TCPV4: 234 case VIRTIO_NET_HDR_GSO_TCPV4:
240 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 235 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
241 break; 236 break;
@@ -248,14 +243,14 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
248 default: 243 default:
249 if (net_ratelimit()) 244 if (net_ratelimit())
250 printk(KERN_WARNING "%s: bad gso type %u.\n", 245 printk(KERN_WARNING "%s: bad gso type %u.\n",
251 dev->name, hdr->gso_type); 246 dev->name, hdr->hdr.gso_type);
252 goto frame_err; 247 goto frame_err;
253 } 248 }
254 249
255 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) 250 if (hdr->hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
256 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 251 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
257 252
258 skb_shinfo(skb)->gso_size = hdr->gso_size; 253 skb_shinfo(skb)->gso_size = hdr->hdr.gso_size;
259 if (skb_shinfo(skb)->gso_size == 0) { 254 if (skb_shinfo(skb)->gso_size == 0) {
260 if (net_ratelimit()) 255 if (net_ratelimit())
261 printk(KERN_WARNING "%s: zero gso size.\n", 256 printk(KERN_WARNING "%s: zero gso size.\n",
@@ -285,8 +280,8 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
285 bool oom = false; 280 bool oom = false;
286 281
287 sg_init_table(sg, 2+MAX_SKB_FRAGS); 282 sg_init_table(sg, 2+MAX_SKB_FRAGS);
288 for (;;) { 283 do {
289 struct virtio_net_hdr *hdr; 284 struct skb_vnet_hdr *hdr;
290 285
291 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
292 if (unlikely(!skb)) { 287 if (unlikely(!skb)) {
@@ -298,7 +293,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
298 skb_put(skb, MAX_PACKET_LEN); 293 skb_put(skb, MAX_PACKET_LEN);
299 294
300 hdr = skb_vnet_hdr(skb); 295 hdr = skb_vnet_hdr(skb);
301 sg_set_buf(sg, hdr, sizeof(*hdr)); 296 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
302 297
303 if (vi->big_packets) { 298 if (vi->big_packets) {
304 for (i = 0; i < MAX_SKB_FRAGS; i++) { 299 for (i = 0; i < MAX_SKB_FRAGS; i++) {
@@ -328,7 +323,7 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
328 break; 323 break;
329 } 324 }
330 vi->num++; 325 vi->num++;
331 } 326 } while (err >= num);
332 if (unlikely(vi->num > vi->max)) 327 if (unlikely(vi->num > vi->max))
333 vi->max = vi->num; 328 vi->max = vi->num;
334 vi->rvq->vq_ops->kick(vi->rvq); 329 vi->rvq->vq_ops->kick(vi->rvq);
@@ -346,7 +341,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
346 if (!vi->mergeable_rx_bufs) 341 if (!vi->mergeable_rx_bufs)
347 return try_fill_recv_maxbufs(vi, gfp); 342 return try_fill_recv_maxbufs(vi, gfp);
348 343
349 for (;;) { 344 do {
350 skb_frag_t *f; 345 skb_frag_t *f;
351 346
352 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 347 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
@@ -380,7 +375,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
380 break; 375 break;
381 } 376 }
382 vi->num++; 377 vi->num++;
383 } 378 } while (err > 0);
384 if (unlikely(vi->num > vi->max)) 379 if (unlikely(vi->num > vi->max))
385 vi->max = vi->num; 380 vi->max = vi->num;
386 vi->rvq->vq_ops->kick(vi->rvq); 381 vi->rvq->vq_ops->kick(vi->rvq);
@@ -448,42 +443,26 @@ again:
448 return received; 443 return received;
449} 444}
450 445
451static void free_old_xmit_skbs(struct virtnet_info *vi) 446static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
452{ 447{
453 struct sk_buff *skb; 448 struct sk_buff *skb;
454 unsigned int len; 449 unsigned int len, tot_sgs = 0;
455 450
456 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { 451 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
457 pr_debug("Sent skb %p\n", skb); 452 pr_debug("Sent skb %p\n", skb);
458 __skb_unlink(skb, &vi->send); 453 __skb_unlink(skb, &vi->send);
459 vi->dev->stats.tx_bytes += skb->len; 454 vi->dev->stats.tx_bytes += skb->len;
460 vi->dev->stats.tx_packets++; 455 vi->dev->stats.tx_packets++;
456 tot_sgs += skb_vnet_hdr(skb)->num_sg;
461 kfree_skb(skb); 457 kfree_skb(skb);
462 } 458 }
463} 459 return tot_sgs;
464
465/* If the virtio transport doesn't always notify us when all in-flight packets
466 * are consumed, we fall back to using this function on a timer to free them. */
467static void xmit_free(unsigned long data)
468{
469 struct virtnet_info *vi = (void *)data;
470
471 netif_tx_lock(vi->dev);
472
473 free_old_xmit_skbs(vi);
474
475 if (!skb_queue_empty(&vi->send))
476 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
477
478 netif_tx_unlock(vi->dev);
479} 460}
480 461
481static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 462static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
482{ 463{
483 int num, err;
484 struct scatterlist sg[2+MAX_SKB_FRAGS]; 464 struct scatterlist sg[2+MAX_SKB_FRAGS];
485 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb); 465 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
486 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
487 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 466 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
488 467
489 sg_init_table(sg, 2+MAX_SKB_FRAGS); 468 sg_init_table(sg, 2+MAX_SKB_FRAGS);
@@ -491,108 +470,89 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
491 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 470 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
492 471
493 if (skb->ip_summed == CHECKSUM_PARTIAL) { 472 if (skb->ip_summed == CHECKSUM_PARTIAL) {
494 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 473 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
495 hdr->csum_start = skb->csum_start - skb_headroom(skb); 474 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
496 hdr->csum_offset = skb->csum_offset; 475 hdr->hdr.csum_offset = skb->csum_offset;
497 } else { 476 } else {
498 hdr->flags = 0; 477 hdr->hdr.flags = 0;
499 hdr->csum_offset = hdr->csum_start = 0; 478 hdr->hdr.csum_offset = hdr->hdr.csum_start = 0;
500 } 479 }
501 480
502 if (skb_is_gso(skb)) { 481 if (skb_is_gso(skb)) {
503 hdr->hdr_len = skb_headlen(skb); 482 hdr->hdr.hdr_len = skb_headlen(skb);
504 hdr->gso_size = skb_shinfo(skb)->gso_size; 483 hdr->hdr.gso_size = skb_shinfo(skb)->gso_size;
505 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 484 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
506 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 485 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
507 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 486 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
508 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 487 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
509 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 488 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
510 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP; 489 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
511 else 490 else
512 BUG(); 491 BUG();
513 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 492 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
514 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; 493 hdr->hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
515 } else { 494 } else {
516 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE; 495 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
517 hdr->gso_size = hdr->hdr_len = 0; 496 hdr->hdr.gso_size = hdr->hdr.hdr_len = 0;
518 } 497 }
519 498
520 mhdr->num_buffers = 0; 499 hdr->mhdr.num_buffers = 0;
521 500
522 /* Encode metadata header at front. */ 501 /* Encode metadata header at front. */
523 if (vi->mergeable_rx_bufs) 502 if (vi->mergeable_rx_bufs)
524 sg_set_buf(sg, mhdr, sizeof(*mhdr)); 503 sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
525 else 504 else
526 sg_set_buf(sg, hdr, sizeof(*hdr)); 505 sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
527 506
528 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 507 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
529 508 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
530 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
531 if (err >= 0 && !vi->free_in_tasklet)
532 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
533
534 return err;
535}
536
537static void xmit_tasklet(unsigned long data)
538{
539 struct virtnet_info *vi = (void *)data;
540
541 netif_tx_lock_bh(vi->dev);
542 if (vi->last_xmit_skb && xmit_skb(vi, vi->last_xmit_skb) >= 0) {
543 vi->svq->vq_ops->kick(vi->svq);
544 vi->last_xmit_skb = NULL;
545 }
546 if (vi->free_in_tasklet)
547 free_old_xmit_skbs(vi);
548 netif_tx_unlock_bh(vi->dev);
549} 509}
550 510
551static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 511static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
552{ 512{
553 struct virtnet_info *vi = netdev_priv(dev); 513 struct virtnet_info *vi = netdev_priv(dev);
514 int capacity;
554 515
555again: 516again:
556 /* Free up any pending old buffers before queueing new ones. */ 517 /* Free up any pending old buffers before queueing new ones. */
557 free_old_xmit_skbs(vi); 518 free_old_xmit_skbs(vi);
558 519
559 /* If we has a buffer left over from last time, send it now. */
560 if (unlikely(vi->last_xmit_skb) &&
561 xmit_skb(vi, vi->last_xmit_skb) < 0)
562 goto stop_queue;
563
564 vi->last_xmit_skb = NULL;
565
566 /* Put new one in send queue and do transmit */ 520 /* Put new one in send queue and do transmit */
567 if (likely(skb)) { 521 __skb_queue_head(&vi->send, skb);
568 __skb_queue_head(&vi->send, skb); 522 capacity = xmit_skb(vi, skb);
569 if (xmit_skb(vi, skb) < 0) { 523
570 vi->last_xmit_skb = skb; 524 /* This can happen with OOM and indirect buffers. */
571 skb = NULL; 525 if (unlikely(capacity < 0)) {
572 goto stop_queue; 526 netif_stop_queue(dev);
527 dev_warn(&dev->dev, "Unexpected full queue\n");
528 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
529 vi->svq->vq_ops->disable_cb(vi->svq);
530 netif_start_queue(dev);
531 goto again;
573 } 532 }
533 return NETDEV_TX_BUSY;
574 } 534 }
575done:
576 vi->svq->vq_ops->kick(vi->svq);
577 return NETDEV_TX_OK;
578 535
579stop_queue: 536 vi->svq->vq_ops->kick(vi->svq);
580 pr_debug("%s: virtio not prepared to send\n", dev->name); 537 /* Don't wait up for transmitted skbs to be freed. */
581 netif_stop_queue(dev); 538 skb_orphan(skb);
582 539 nf_reset(skb);
583 /* Activate callback for using skbs: if this returns false it 540
584 * means some were used in the meantime. */ 541 /* Apparently nice girls don't return TX_BUSY; stop the queue
585 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { 542 * before it gets out of hand. Naturally, this wastes entries. */
586 vi->svq->vq_ops->disable_cb(vi->svq); 543 if (capacity < 2+MAX_SKB_FRAGS) {
587 netif_start_queue(dev); 544 netif_stop_queue(dev);
588 goto again; 545 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
589 } 546 /* More just got used, free them then recheck. */
590 if (skb) { 547 capacity += free_old_xmit_skbs(vi);
591 /* Drop this skb: we only queue one. */ 548 if (capacity >= 2+MAX_SKB_FRAGS) {
592 vi->dev->stats.tx_dropped++; 549 netif_start_queue(dev);
593 kfree_skb(skb); 550 vi->svq->vq_ops->disable_cb(vi->svq);
551 }
552 }
594 } 553 }
595 goto done; 554
555 return NETDEV_TX_OK;
596} 556}
597 557
598static int virtnet_set_mac_address(struct net_device *dev, void *p) 558static int virtnet_set_mac_address(struct net_device *dev, void *p)
@@ -925,10 +885,6 @@ static int virtnet_probe(struct virtio_device *vdev)
925 vi->pages = NULL; 885 vi->pages = NULL;
926 INIT_DELAYED_WORK(&vi->refill, refill_work); 886 INIT_DELAYED_WORK(&vi->refill, refill_work);
927 887
928 /* If they give us a callback when all buffers are done, we don't need
929 * the timer. */
930 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
931
932 /* If we can receive ANY GSO packets, we must allocate large ones. */ 888 /* If we can receive ANY GSO packets, we must allocate large ones. */
933 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) 889 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
934 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) 890 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
@@ -960,11 +916,6 @@ static int virtnet_probe(struct virtio_device *vdev)
960 skb_queue_head_init(&vi->recv); 916 skb_queue_head_init(&vi->recv);
961 skb_queue_head_init(&vi->send); 917 skb_queue_head_init(&vi->send);
962 918
963 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
964
965 if (!vi->free_in_tasklet)
966 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
967
968 err = register_netdev(dev); 919 err = register_netdev(dev);
969 if (err) { 920 if (err) {
970 pr_debug("virtio_net: registering device failed\n"); 921 pr_debug("virtio_net: registering device failed\n");
@@ -1005,9 +956,6 @@ static void virtnet_remove(struct virtio_device *vdev)
1005 /* Stop all the virtqueues. */ 956 /* Stop all the virtqueues. */
1006 vdev->config->reset(vdev); 957 vdev->config->reset(vdev);
1007 958
1008 if (!vi->free_in_tasklet)
1009 del_timer_sync(&vi->xmit_free_timer);
1010
1011 /* Free our skbs in send and recv queues, if any. */ 959 /* Free our skbs in send and recv queues, if any. */
1012 while ((skb = __skb_dequeue(&vi->recv)) != NULL) { 960 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
1013 kfree_skb(skb); 961 kfree_skb(skb);
@@ -1041,7 +989,6 @@ static unsigned int features[] = {
1041 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 989 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
1042 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 990 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1043 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 991 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1044 VIRTIO_F_NOTIFY_ON_EMPTY,
1045}; 992};
1046 993
1047static struct virtio_driver virtio_net = { 994static struct virtio_driver virtio_net = {