diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2014-10-23 17:12:10 -0400 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2014-12-09 05:05:27 -0500 |
commit | 946fa5647b529402161814ca8ed1302254b6affb (patch) | |
tree | d5cb48d9a5b0fbf9cf313745ff5c222ff304000f | |
parent | 38f37b578f7d62a827d6f42e21d55bf428f9af8e (diff) |
virtio_net: pass vi around
Too many places poke at [rs]q->vq->vdev->priv just to get
the vi structure. Let's just pass the pointer around: seems
cleaner, and might even be faster.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Cornelia Huck <cornelia.huck@de.ibm.com>
-rw-r--r-- | drivers/net/virtio_net.c | 38 |
1 files changed, 20 insertions, 18 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index c07e0302438e..1630c217d9f7 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -241,11 +241,11 @@ static unsigned long mergeable_buf_to_ctx(void *buf, unsigned int truesize) | |||
241 | } | 241 | } |
242 | 242 | ||
243 | /* Called from bottom half context */ | 243 | /* Called from bottom half context */ |
244 | static struct sk_buff *page_to_skb(struct receive_queue *rq, | 244 | static struct sk_buff *page_to_skb(struct virtnet_info *vi, |
245 | struct receive_queue *rq, | ||
245 | struct page *page, unsigned int offset, | 246 | struct page *page, unsigned int offset, |
246 | unsigned int len, unsigned int truesize) | 247 | unsigned int len, unsigned int truesize) |
247 | { | 248 | { |
248 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
249 | struct sk_buff *skb; | 249 | struct sk_buff *skb; |
250 | struct skb_vnet_hdr *hdr; | 250 | struct skb_vnet_hdr *hdr; |
251 | unsigned int copy, hdr_len, hdr_padded_len; | 251 | unsigned int copy, hdr_len, hdr_padded_len; |
@@ -328,12 +328,13 @@ static struct sk_buff *receive_small(void *buf, unsigned int len) | |||
328 | } | 328 | } |
329 | 329 | ||
330 | static struct sk_buff *receive_big(struct net_device *dev, | 330 | static struct sk_buff *receive_big(struct net_device *dev, |
331 | struct virtnet_info *vi, | ||
331 | struct receive_queue *rq, | 332 | struct receive_queue *rq, |
332 | void *buf, | 333 | void *buf, |
333 | unsigned int len) | 334 | unsigned int len) |
334 | { | 335 | { |
335 | struct page *page = buf; | 336 | struct page *page = buf; |
336 | struct sk_buff *skb = page_to_skb(rq, page, 0, len, PAGE_SIZE); | 337 | struct sk_buff *skb = page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); |
337 | 338 | ||
338 | if (unlikely(!skb)) | 339 | if (unlikely(!skb)) |
339 | goto err; | 340 | goto err; |
@@ -359,7 +360,8 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, | |||
359 | int offset = buf - page_address(page); | 360 | int offset = buf - page_address(page); |
360 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); | 361 | unsigned int truesize = max(len, mergeable_ctx_to_buf_truesize(ctx)); |
361 | 362 | ||
362 | struct sk_buff *head_skb = page_to_skb(rq, page, offset, len, truesize); | 363 | struct sk_buff *head_skb = page_to_skb(vi, rq, page, offset, len, |
364 | truesize); | ||
363 | struct sk_buff *curr_skb = head_skb; | 365 | struct sk_buff *curr_skb = head_skb; |
364 | 366 | ||
365 | if (unlikely(!curr_skb)) | 367 | if (unlikely(!curr_skb)) |
@@ -433,9 +435,9 @@ err_buf: | |||
433 | return NULL; | 435 | return NULL; |
434 | } | 436 | } |
435 | 437 | ||
436 | static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | 438 | static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, |
439 | void *buf, unsigned int len) | ||
437 | { | 440 | { |
438 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
439 | struct net_device *dev = vi->dev; | 441 | struct net_device *dev = vi->dev; |
440 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); | 442 | struct virtnet_stats *stats = this_cpu_ptr(vi->stats); |
441 | struct sk_buff *skb; | 443 | struct sk_buff *skb; |
@@ -459,7 +461,7 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len) | |||
459 | if (vi->mergeable_rx_bufs) | 461 | if (vi->mergeable_rx_bufs) |
460 | skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); | 462 | skb = receive_mergeable(dev, vi, rq, (unsigned long)buf, len); |
461 | else if (vi->big_packets) | 463 | else if (vi->big_packets) |
462 | skb = receive_big(dev, rq, buf, len); | 464 | skb = receive_big(dev, vi, rq, buf, len); |
463 | else | 465 | else |
464 | skb = receive_small(buf, len); | 466 | skb = receive_small(buf, len); |
465 | 467 | ||
@@ -539,9 +541,9 @@ frame_err: | |||
539 | dev_kfree_skb(skb); | 541 | dev_kfree_skb(skb); |
540 | } | 542 | } |
541 | 543 | ||
542 | static int add_recvbuf_small(struct receive_queue *rq, gfp_t gfp) | 544 | static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq, |
545 | gfp_t gfp) | ||
543 | { | 546 | { |
544 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
545 | struct sk_buff *skb; | 547 | struct sk_buff *skb; |
546 | struct skb_vnet_hdr *hdr; | 548 | struct skb_vnet_hdr *hdr; |
547 | int err; | 549 | int err; |
@@ -664,9 +666,9 @@ static int add_recvbuf_mergeable(struct receive_queue *rq, gfp_t gfp) | |||
664 | * before we're receiving packets, or from refill_work which is | 666 | * before we're receiving packets, or from refill_work which is |
665 | * careful to disable receiving (using napi_disable). | 667 | * careful to disable receiving (using napi_disable). |
666 | */ | 668 | */ |
667 | static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | 669 | static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, |
670 | gfp_t gfp) | ||
668 | { | 671 | { |
669 | struct virtnet_info *vi = rq->vq->vdev->priv; | ||
670 | int err; | 672 | int err; |
671 | bool oom; | 673 | bool oom; |
672 | 674 | ||
@@ -677,7 +679,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp) | |||
677 | else if (vi->big_packets) | 679 | else if (vi->big_packets) |
678 | err = add_recvbuf_big(rq, gfp); | 680 | err = add_recvbuf_big(rq, gfp); |
679 | else | 681 | else |
680 | err = add_recvbuf_small(rq, gfp); | 682 | err = add_recvbuf_small(vi, rq, gfp); |
681 | 683 | ||
682 | oom = err == -ENOMEM; | 684 | oom = err == -ENOMEM; |
683 | if (err) | 685 | if (err) |
@@ -726,7 +728,7 @@ static void refill_work(struct work_struct *work) | |||
726 | struct receive_queue *rq = &vi->rq[i]; | 728 | struct receive_queue *rq = &vi->rq[i]; |
727 | 729 | ||
728 | napi_disable(&rq->napi); | 730 | napi_disable(&rq->napi); |
729 | still_empty = !try_fill_recv(rq, GFP_KERNEL); | 731 | still_empty = !try_fill_recv(vi, rq, GFP_KERNEL); |
730 | virtnet_napi_enable(rq); | 732 | virtnet_napi_enable(rq); |
731 | 733 | ||
732 | /* In theory, this can happen: if we don't get any buffers in | 734 | /* In theory, this can happen: if we don't get any buffers in |
@@ -745,12 +747,12 @@ static int virtnet_receive(struct receive_queue *rq, int budget) | |||
745 | 747 | ||
746 | while (received < budget && | 748 | while (received < budget && |
747 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { | 749 | (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { |
748 | receive_buf(rq, buf, len); | 750 | receive_buf(vi, rq, buf, len); |
749 | received++; | 751 | received++; |
750 | } | 752 | } |
751 | 753 | ||
752 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { | 754 | if (rq->vq->num_free > virtqueue_get_vring_size(rq->vq) / 2) { |
753 | if (!try_fill_recv(rq, GFP_ATOMIC)) | 755 | if (!try_fill_recv(vi, rq, GFP_ATOMIC)) |
754 | schedule_delayed_work(&vi->refill, 0); | 756 | schedule_delayed_work(&vi->refill, 0); |
755 | } | 757 | } |
756 | 758 | ||
@@ -826,7 +828,7 @@ static int virtnet_open(struct net_device *dev) | |||
826 | for (i = 0; i < vi->max_queue_pairs; i++) { | 828 | for (i = 0; i < vi->max_queue_pairs; i++) { |
827 | if (i < vi->curr_queue_pairs) | 829 | if (i < vi->curr_queue_pairs) |
828 | /* Make sure we have some buffers: if oom use wq. */ | 830 | /* Make sure we have some buffers: if oom use wq. */ |
829 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | 831 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
830 | schedule_delayed_work(&vi->refill, 0); | 832 | schedule_delayed_work(&vi->refill, 0); |
831 | virtnet_napi_enable(&vi->rq[i]); | 833 | virtnet_napi_enable(&vi->rq[i]); |
832 | } | 834 | } |
@@ -1851,7 +1853,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1851 | 1853 | ||
1852 | /* Last of all, set up some receive buffers. */ | 1854 | /* Last of all, set up some receive buffers. */ |
1853 | for (i = 0; i < vi->curr_queue_pairs; i++) { | 1855 | for (i = 0; i < vi->curr_queue_pairs; i++) { |
1854 | try_fill_recv(&vi->rq[i], GFP_KERNEL); | 1856 | try_fill_recv(vi, &vi->rq[i], GFP_KERNEL); |
1855 | 1857 | ||
1856 | /* If we didn't even get one input buffer, we're useless. */ | 1858 | /* If we didn't even get one input buffer, we're useless. */ |
1857 | if (vi->rq[i].vq->num_free == | 1859 | if (vi->rq[i].vq->num_free == |
@@ -1971,7 +1973,7 @@ static int virtnet_restore(struct virtio_device *vdev) | |||
1971 | 1973 | ||
1972 | if (netif_running(vi->dev)) { | 1974 | if (netif_running(vi->dev)) { |
1973 | for (i = 0; i < vi->curr_queue_pairs; i++) | 1975 | for (i = 0; i < vi->curr_queue_pairs; i++) |
1974 | if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) | 1976 | if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) |
1975 | schedule_delayed_work(&vi->refill, 0); | 1977 | schedule_delayed_work(&vi->refill, 0); |
1976 | 1978 | ||
1977 | for (i = 0; i < vi->max_queue_pairs; i++) | 1979 | for (i = 0; i < vi->max_queue_pairs; i++) |