diff options
-rw-r--r-- | drivers/net/virtio_net.c | 85 |
1 files changed, 79 insertions, 6 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index be3686a298da..fbea637eb742 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -40,6 +40,15 @@ module_param(gso, bool, 0444); | |||
40 | 40 | ||
41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 | 41 | #define VIRTNET_SEND_COMMAND_SG_MAX 2 |
42 | 42 | ||
43 | struct virtnet_stats { | ||
44 | struct u64_stats_sync syncp; | ||
45 | u64 tx_bytes; | ||
46 | u64 tx_packets; | ||
47 | |||
48 | u64 rx_bytes; | ||
49 | u64 rx_packets; | ||
50 | }; | ||
51 | |||
43 | struct virtnet_info { | 52 | struct virtnet_info { |
44 | struct virtio_device *vdev; | 53 | struct virtio_device *vdev; |
45 | struct virtqueue *rvq, *svq, *cvq; | 54 | struct virtqueue *rvq, *svq, *cvq; |
@@ -56,6 +65,9 @@ struct virtnet_info { | |||
56 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ | 65 | /* Host will merge rx buffers for big packets (shake it! shake it!) */ |
57 | bool mergeable_rx_bufs; | 66 | bool mergeable_rx_bufs; |
58 | 67 | ||
68 | /* Active statistics */ | ||
69 | struct virtnet_stats __percpu *stats; | ||
70 | |||
59 | /* Work struct for refilling if we run low on memory. */ | 71 | /* Work struct for refilling if we run low on memory. */ |
60 | struct delayed_work refill; | 72 | struct delayed_work refill; |
61 | 73 | ||
@@ -209,7 +221,6 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
209 | skb->dev->stats.rx_length_errors++; | 221 | skb->dev->stats.rx_length_errors++; |
210 | return -EINVAL; | 222 | return -EINVAL; |
211 | } | 223 | } |
212 | |||
213 | page = virtqueue_get_buf(vi->rvq, &len); | 224 | page = virtqueue_get_buf(vi->rvq, &len); |
214 | if (!page) { | 225 | if (!page) { |
215 | pr_debug("%s: rx error: %d buffers missing\n", | 226 | pr_debug("%s: rx error: %d buffers missing\n", |
@@ -217,6 +228,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
217 | skb->dev->stats.rx_length_errors++; | 228 | skb->dev->stats.rx_length_errors++; |
218 | return -EINVAL; | 229 | return -EINVAL; |
219 | } | 230 | } |
231 | |||
220 | if (len > PAGE_SIZE) | 232 | if (len > PAGE_SIZE) |
221 | len = PAGE_SIZE; | 233 | len = PAGE_SIZE; |
222 | 234 | ||
@@ -230,6 +242,7 @@ static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb) | |||
230 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | 242 | static void receive_buf(struct net_device *dev, void *buf, unsigned int len) |
231 | { | 243 | { |
232 | struct virtnet_info *vi = netdev_priv(dev); | 244 | struct virtnet_info *vi = netdev_priv(dev); |
245 | struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); | ||
233 | struct sk_buff *skb; | 246 | struct sk_buff *skb; |
234 | struct page *page; | 247 | struct page *page; |
235 | struct skb_vnet_hdr *hdr; | 248 | struct skb_vnet_hdr *hdr; |
@@ -265,8 +278,11 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len) | |||
265 | 278 | ||
266 | hdr = skb_vnet_hdr(skb); | 279 | hdr = skb_vnet_hdr(skb); |
267 | skb->truesize += skb->data_len; | 280 | skb->truesize += skb->data_len; |
268 | dev->stats.rx_bytes += skb->len; | 281 | |
269 | dev->stats.rx_packets++; | 282 | u64_stats_update_begin(&stats->syncp); |
283 | stats->rx_bytes += skb->len; | ||
284 | stats->rx_packets++; | ||
285 | u64_stats_update_end(&stats->syncp); | ||
270 | 286 | ||
271 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { | 287 | if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
272 | pr_debug("Needs csum!\n"); | 288 | pr_debug("Needs csum!\n"); |
@@ -515,11 +531,16 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi) | |||
515 | { | 531 | { |
516 | struct sk_buff *skb; | 532 | struct sk_buff *skb; |
517 | unsigned int len, tot_sgs = 0; | 533 | unsigned int len, tot_sgs = 0; |
534 | struct virtnet_stats __percpu *stats = this_cpu_ptr(vi->stats); | ||
518 | 535 | ||
519 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { | 536 | while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { |
520 | pr_debug("Sent skb %p\n", skb); | 537 | pr_debug("Sent skb %p\n", skb); |
521 | vi->dev->stats.tx_bytes += skb->len; | 538 | |
522 | vi->dev->stats.tx_packets++; | 539 | u64_stats_update_begin(&stats->syncp); |
540 | stats->tx_bytes += skb->len; | ||
541 | stats->tx_packets++; | ||
542 | u64_stats_update_end(&stats->syncp); | ||
543 | |||
523 | tot_sgs += skb_vnet_hdr(skb)->num_sg; | 544 | tot_sgs += skb_vnet_hdr(skb)->num_sg; |
524 | dev_kfree_skb_any(skb); | 545 | dev_kfree_skb_any(skb); |
525 | } | 546 | } |
@@ -641,6 +662,40 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p) | |||
641 | return 0; | 662 | return 0; |
642 | } | 663 | } |
643 | 664 | ||
665 | static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev, | ||
666 | struct rtnl_link_stats64 *tot) | ||
667 | { | ||
668 | struct virtnet_info *vi = netdev_priv(dev); | ||
669 | int cpu; | ||
670 | unsigned int start; | ||
671 | |||
672 | for_each_possible_cpu(cpu) { | ||
673 | struct virtnet_stats __percpu *stats | ||
674 | = per_cpu_ptr(vi->stats, cpu); | ||
675 | u64 tpackets, tbytes, rpackets, rbytes; | ||
676 | |||
677 | do { | ||
678 | start = u64_stats_fetch_begin(&stats->syncp); | ||
679 | tpackets = stats->tx_packets; | ||
680 | tbytes = stats->tx_bytes; | ||
681 | rpackets = stats->rx_packets; | ||
682 | rbytes = stats->rx_bytes; | ||
683 | } while (u64_stats_fetch_retry(&stats->syncp, start)); | ||
684 | |||
685 | tot->rx_packets += rpackets; | ||
686 | tot->tx_packets += tpackets; | ||
687 | tot->rx_bytes += rbytes; | ||
688 | tot->tx_bytes += tbytes; | ||
689 | } | ||
690 | |||
691 | tot->tx_dropped = dev->stats.tx_dropped; | ||
692 | tot->rx_dropped = dev->stats.rx_dropped; | ||
693 | tot->rx_length_errors = dev->stats.rx_length_errors; | ||
694 | tot->rx_frame_errors = dev->stats.rx_frame_errors; | ||
695 | |||
696 | return tot; | ||
697 | } | ||
698 | |||
644 | #ifdef CONFIG_NET_POLL_CONTROLLER | 699 | #ifdef CONFIG_NET_POLL_CONTROLLER |
645 | static void virtnet_netpoll(struct net_device *dev) | 700 | static void virtnet_netpoll(struct net_device *dev) |
646 | { | 701 | { |
@@ -650,6 +705,14 @@ static void virtnet_netpoll(struct net_device *dev) | |||
650 | } | 705 | } |
651 | #endif | 706 | #endif |
652 | 707 | ||
708 | static void virtnet_free(struct net_device *dev) | ||
709 | { | ||
710 | struct virtnet_info *vi = netdev_priv(dev); | ||
711 | |||
712 | free_percpu(vi->stats); | ||
713 | free_netdev(dev); | ||
714 | } | ||
715 | |||
653 | static int virtnet_open(struct net_device *dev) | 716 | static int virtnet_open(struct net_device *dev) |
654 | { | 717 | { |
655 | struct virtnet_info *vi = netdev_priv(dev); | 718 | struct virtnet_info *vi = netdev_priv(dev); |
@@ -835,6 +898,7 @@ static const struct net_device_ops virtnet_netdev = { | |||
835 | .ndo_set_mac_address = virtnet_set_mac_address, | 898 | .ndo_set_mac_address = virtnet_set_mac_address, |
836 | .ndo_set_rx_mode = virtnet_set_rx_mode, | 899 | .ndo_set_rx_mode = virtnet_set_rx_mode, |
837 | .ndo_change_mtu = virtnet_change_mtu, | 900 | .ndo_change_mtu = virtnet_change_mtu, |
901 | .ndo_get_stats64 = virtnet_stats, | ||
838 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, | 902 | .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid, |
839 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, | 903 | .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, |
840 | #ifdef CONFIG_NET_POLL_CONTROLLER | 904 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -895,6 +959,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
895 | /* Set up network device as normal. */ | 959 | /* Set up network device as normal. */ |
896 | dev->netdev_ops = &virtnet_netdev; | 960 | dev->netdev_ops = &virtnet_netdev; |
897 | dev->features = NETIF_F_HIGHDMA; | 961 | dev->features = NETIF_F_HIGHDMA; |
962 | dev->destructor = virtnet_free; | ||
963 | |||
898 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); | 964 | SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops); |
899 | SET_NETDEV_DEV(dev, &vdev->dev); | 965 | SET_NETDEV_DEV(dev, &vdev->dev); |
900 | 966 | ||
@@ -939,6 +1005,11 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
939 | vi->vdev = vdev; | 1005 | vi->vdev = vdev; |
940 | vdev->priv = vi; | 1006 | vdev->priv = vi; |
941 | vi->pages = NULL; | 1007 | vi->pages = NULL; |
1008 | vi->stats = alloc_percpu(struct virtnet_stats); | ||
1009 | err = -ENOMEM; | ||
1010 | if (vi->stats == NULL) | ||
1011 | goto free; | ||
1012 | |||
942 | INIT_DELAYED_WORK(&vi->refill, refill_work); | 1013 | INIT_DELAYED_WORK(&vi->refill, refill_work); |
943 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); | 1014 | sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg)); |
944 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); | 1015 | sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg)); |
@@ -958,7 +1029,7 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
958 | 1029 | ||
959 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); | 1030 | err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names); |
960 | if (err) | 1031 | if (err) |
961 | goto free; | 1032 | goto free_stats; |
962 | 1033 | ||
963 | vi->rvq = vqs[0]; | 1034 | vi->rvq = vqs[0]; |
964 | vi->svq = vqs[1]; | 1035 | vi->svq = vqs[1]; |
@@ -1003,6 +1074,8 @@ unregister: | |||
1003 | cancel_delayed_work_sync(&vi->refill); | 1074 | cancel_delayed_work_sync(&vi->refill); |
1004 | free_vqs: | 1075 | free_vqs: |
1005 | vdev->config->del_vqs(vdev); | 1076 | vdev->config->del_vqs(vdev); |
1077 | free_stats: | ||
1078 | free_percpu(vi->stats); | ||
1006 | free: | 1079 | free: |
1007 | free_netdev(dev); | 1080 | free_netdev(dev); |
1008 | return err; | 1081 | return err; |