diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2010-04-12 09:19:04 -0400 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2010-05-19 08:45:43 -0400 |
commit | 1915a712f210f0b63d10bc4f875e8e66aac7a2c4 (patch) | |
tree | 33057312b307a7076e2a4b43c9e26222328d8520 /drivers/net | |
parent | 09ec6b69d2b97d6fca16cfe91b4634506f4db0a7 (diff) |
virtio_net: use virtqueue_xxx wrappers
Switch virtio_net to new virtqueue_xxx wrappers.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/virtio_net.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b0577dd1a42d..91738d83dbdc 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -119,7 +119,7 @@ static void skb_xmit_done(struct virtqueue *svq) | |||
119 | struct virtnet_info *vi = svq->vdev->priv; | 119 | struct virtnet_info *vi = svq->vdev->priv; |
120 | 120 | ||
121 | /* Suppress further interrupts. */ | 121 | /* Suppress further interrupts. */ |
122 | svq->vq_ops->disable_cb(svq); | 122 | virtqueue_disable_cb(svq); |
123 | 123 | ||
124 | /* We were probably waiting for more output buffers. */ | 124 | /* We were probably waiting for more output buffers. */ |
125 | netif_wake_queue(vi->dev); | 125 | netif_wake_queue(vi->dev); |
@@ -207,7 +207,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
207 | return -EINVAL; | 207 | return -EINVAL; |
208 | } | 208 | } |
209 | 209 | ||
210 | page = vi->rvq->vq_ops->get_buf(vi->rvq, &len); | 210 | page = virtqueue_get_buf(vi->rvq, &len); |
211 | if (!page) { | 211 | if (!page) { |
212 | pr_debug("%s: rx error: %d buffers missing\n", | 212 | pr_debug("%s: rx error: %d buffers missing\n", |
213 | skb->dev->name, hdr->mhdr.num_buffers); | 213 | skb->dev->name, hdr->mhdr.num_buffers); |
@@ -339,7 +339,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp) | |||
339 | 339 | ||
340 | skb_to_sgvec(skb, sg + 1, 0, skb->len); | 340 | skb_to_sgvec(skb, sg + 1, 0, skb->len); |
341 | 341 | ||
342 | err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); | 342 | err = virtqueue_add_buf(vi->rvq, sg, 0, 2, skb); |
343 | if (err < 0) | 343 | if (err < 0) |
344 | dev_kfree_skb(skb); | 344 | dev_kfree_skb(skb); |
345 | 345 | ||
@@ -386,7 +386,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) | |||
386 | 386 | ||
387 | /* chain first in list head */ | 387 | /* chain first in list head */ |
388 | first->private = (unsigned long)list; | 388 | first->private = (unsigned long)list; |
389 | err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, | 389 | err = virtqueue_add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, |
390 | first); | 390 | first); |
391 | if (err < 0) | 391 | if (err < 0) |
392 | give_pages(vi, first); | 392 | give_pages(vi, first); |
@@ -406,7 +406,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) | |||
406 | 406 | ||
407 | sg_init_one(&sg, page_address(page), PAGE_SIZE); | 407 | sg_init_one(&sg, page_address(page), PAGE_SIZE); |
408 | 408 | ||
409 | err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); | 409 | err = virtqueue_add_buf(vi->rvq, &sg, 0, 1, page); |
410 | if (err < 0) | 410 | if (err < 0) |
411 | give_pages(vi, page); | 411 | give_pages(vi, page); |
412 | 412 | ||
@@ -435,7 +435,7 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) | |||
435 | } while (err > 0); | 435 | } while (err > 0); |
436 | if (unlikely(vi->num > vi->max)) | 436 | if (unlikely(vi->num > vi->max)) |
437 | vi->max = vi->num; | 437 | vi->max = vi->num; |
438 | vi->rvq->vq_ops->kick(vi->rvq); | 438 | virtqueue_kick(vi->rvq); |
439 | return !oom; | 439 | return !oom; |
440 | } | 440 | } |
441 | 441 | ||
@@ -444,7 +444,7 @@ static void skb_recv_done(struct virtqueue *rvq) | |||
444 | struct virtnet_info *vi = rvq->vdev->priv; | 444 | struct virtnet_info *vi = rvq->vdev->priv; |
445 | /* Schedule NAPI, Suppress further interrupts if successful. */ | 445 | /* Schedule NAPI, Suppress further interrupts if successful. */ |
446 | if (napi_schedule_prep(&vi->napi)) { | 446 | if (napi_schedule_prep(&vi->napi)) { |
447 | rvq->vq_ops->disable_cb(rvq); | 447 | virtqueue_disable_cb(rvq); |
448 | __napi_schedule(&vi->napi); | 448 | __napi_schedule(&vi->napi); |
449 | } | 449 | } |
450 | } | 450 | } |
@@ -473,7 +473,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) | |||
473 | 473 | ||
474 | again: | 474 | again: |
475 | while (received < budget && | 475 | while (received < budget && |
476 | (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) { | 476 | (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) { |
477 | receive_buf(vi->dev, buf, len); | 477 | receive_buf(vi->dev, buf, len); |
478 | --vi->num; | 478 | --vi->num; |
479 | received++; | 479 | received++; |
@@ -487,9 +487,9 @@ again: | |||
487 | /* Out of packets? */ | 487 | /* Out of packets? */ |
488 | if (received < budget) { | 488 | if (received < budget) { |
489 | napi_complete(napi); | 489 | napi_complete(napi); |
490 | if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq)) && | 490 | if (unlikely(!virtqueue_enable_cb(vi->rvq)) && |
491 | napi_schedule_prep(napi)) { | 491 | napi_schedule_prep(napi)) { |
492 | vi->rvq->vq_ops->disable_cb(vi->rvq); | 492 | virtqueue_disable_cb(vi->rvq); |
493 | __napi_schedule(napi); | 493 | __napi_schedule(napi); |
494 | goto again; | 494 | goto again; |
495 | } | 495 | } |
@@ -503,7 +503,7 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
503 | struct sk_buff *skb; | 503 | struct sk_buff *skb; |
504 | unsigned int len, tot_sgs = 0; | 504 | unsigned int len, tot_sgs = 0; |
505 | 505 | ||
506 | while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) { | 506 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
507 | pr_debug("Sent skb %p\n", skb); | 507 | pr_debug("Sent skb %p\n", skb); |
508 | vi->dev->stats.tx_bytes += skb->len; | 508 | vi->dev->stats.tx_bytes += skb->len; |
509 | vi->dev->stats.tx_packets++; | 509 | vi->dev->stats.tx_packets++; |
@@ -559,7 +559,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) | |||
559 | sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); | 559 | sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); |
560 | 560 | ||
561 | hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; | 561 | hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; |
562 | return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); | 562 | return virtqueue_add_buf(vi->svq, sg, hdr->num_sg, 0, skb); |
563 | } | 563 | } |
564 | 564 | ||
565 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) | 565 | static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) |
@@ -578,14 +578,14 @@ again: | |||
578 | if (unlikely(capacity < 0)) { | 578 | if (unlikely(capacity < 0)) { |
579 | netif_stop_queue(dev); | 579 | netif_stop_queue(dev); |
580 | dev_warn(&dev->dev, "Unexpected full queue\n"); | 580 | dev_warn(&dev->dev, "Unexpected full queue\n"); |
581 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { | 581 | if (unlikely(!virtqueue_enable_cb(vi->svq))) { |
582 | vi->svq->vq_ops->disable_cb(vi->svq); | 582 | virtqueue_disable_cb(vi->svq); |
583 | netif_start_queue(dev); | 583 | netif_start_queue(dev); |
584 | goto again; | 584 | goto again; |
585 | } | 585 | } |
586 | return NETDEV_TX_BUSY; | 586 | return NETDEV_TX_BUSY; |
587 | } | 587 | } |
588 | vi->svq->vq_ops->kick(vi->svq); | 588 | virtqueue_kick(vi->svq); |
589 | 589 | ||
590 | /* Don't wait up for transmitted skbs to be freed. */ | 590 | /* Don't wait up for transmitted skbs to be freed. */ |
591 | skb_orphan(skb); | 591 | skb_orphan(skb); |
@@ -595,12 +595,12 @@ again: | |||
595 | * before it gets out of hand. Naturally, this wastes entries. */ | 595 | * before it gets out of hand. Naturally, this wastes entries. */ |
596 | if (capacity < 2+MAX_SKB_FRAGS) { | 596 | if (capacity < 2+MAX_SKB_FRAGS) { |
597 | netif_stop_queue(dev); | 597 | netif_stop_queue(dev); |
598 | if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) { | 598 | if (unlikely(!virtqueue_enable_cb(vi->svq))) { |
599 | /* More just got used, free them then recheck. */ | 599 | /* More just got used, free them then recheck. */ |
600 | capacity += free_old_xmit_skbs(vi); | 600 | capacity += free_old_xmit_skbs(vi); |
601 | if (capacity >= 2+MAX_SKB_FRAGS) { | 601 | if (capacity >= 2+MAX_SKB_FRAGS) { |
602 | netif_start_queue(dev); | 602 | netif_start_queue(dev); |
603 | vi->svq->vq_ops->disable_cb(vi->svq); | 603 | virtqueue_disable_cb(vi->svq); |
604 | } | 604 | } |
605 | } | 605 | } |
606 | } | 606 | } |
@@ -645,7 +645,7 @@ static int virtnet_open(struct net_device *dev) | |||
645 | * now. virtnet_poll wants re-enable the queue, so we disable here. | 645 | * now. virtnet_poll wants re-enable the queue, so we disable here. |
646 | * We synchronize against interrupts via NAPI_STATE_SCHED */ | 646 | * We synchronize against interrupts via NAPI_STATE_SCHED */ |
647 | if (napi_schedule_prep(&vi->napi)) { | 647 | if (napi_schedule_prep(&vi->napi)) { |
648 | vi->rvq->vq_ops->disable_cb(vi->rvq); | 648 | virtqueue_disable_cb(vi->rvq); |
649 | __napi_schedule(&vi->napi); | 649 | __napi_schedule(&vi->napi); |
650 | } | 650 | } |
651 | return 0; | 651 | return 0; |
@@ -682,15 +682,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, | |||
682 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); | 682 | sg_set_buf(&sg[i + 1], sg_virt(s), s->length); |
683 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); | 683 | sg_set_buf(&sg[out + in - 1], &status, sizeof(status)); |
684 | 684 | ||
685 | BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0); | 685 | BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0); |
686 | 686 | ||
687 | vi->cvq->vq_ops->kick(vi->cvq); | 687 | virtqueue_kick(vi->cvq); |
688 | 688 | ||
689 | /* | 689 | /* |
690 | * Spin for a response, the kick causes an ioport write, trapping | 690 | * Spin for a response, the kick causes an ioport write, trapping |
691 | * into the hypervisor, so the request should be handled immediately. | 691 | * into the hypervisor, so the request should be handled immediately. |
692 | */ | 692 | */ |
693 | while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp)) | 693 | while (!virtqueue_get_buf(vi->cvq, &tmp)) |
694 | cpu_relax(); | 694 | cpu_relax(); |
695 | 695 | ||
696 | return status == VIRTIO_NET_OK; | 696 | return status == VIRTIO_NET_OK; |
@@ -1006,13 +1006,13 @@ static void free_unused_bufs(struct virtnet_info *vi) | |||
1006 | { | 1006 | { |
1007 | void *buf; | 1007 | void *buf; |
1008 | while (1) { | 1008 | while (1) { |
1009 | buf = vi->svq->vq_ops->detach_unused_buf(vi->svq); | 1009 | buf = virtqueue_detach_unused_buf(vi->svq); |
1010 | if (!buf) | 1010 | if (!buf) |
1011 | break; | 1011 | break; |
1012 | dev_kfree_skb(buf); | 1012 | dev_kfree_skb(buf); |
1013 | } | 1013 | } |
1014 | while (1) { | 1014 | while (1) { |
1015 | buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq); | 1015 | buf = virtqueue_detach_unused_buf(vi->rvq); |
1016 | if (!buf) | 1016 | if (!buf) |
1017 | break; | 1017 | break; |
1018 | if (vi->mergeable_rx_bufs || vi->big_packets) | 1018 | if (vi->mergeable_rx_bufs || vi->big_packets) |