aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorstephen hemminger <shemminger@vyatta.com>2011-06-21 01:35:31 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-21 18:57:05 -0400
commite00f85bec0a9924eb517ccd126ddbb9787068f53 (patch)
tree35b7ea541947b92a4a06c43cd58599ec639961c4 /drivers/net
parent62ea05577ed3ea4f542f9bc17cb716787316e2ea (diff)
xen: convert to 64 bit stats interface
Convert xen driver to 64 bit statistics interface. Use stats_sync to ensure that 64 bit update is read atomically on 32 bit platform. Put hot statistics into per-cpu table. Signed-off-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Ian Campbell <ian.campbell@citrix.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/xen-netfront.c69
1 files changed, 64 insertions, 5 deletions
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index d29365a232a1..d7c8a98daff6 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -70,6 +70,14 @@ struct netfront_cb {
70#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE) 70#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
71#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256) 71#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
72 72
73struct netfront_stats {
74 u64 rx_packets;
75 u64 tx_packets;
76 u64 rx_bytes;
77 u64 tx_bytes;
78 struct u64_stats_sync syncp;
79};
80
73struct netfront_info { 81struct netfront_info {
74 struct list_head list; 82 struct list_head list;
75 struct net_device *netdev; 83 struct net_device *netdev;
@@ -122,6 +130,8 @@ struct netfront_info {
122 struct mmu_update rx_mmu[NET_RX_RING_SIZE]; 130 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
123 131
124 /* Statistics */ 132 /* Statistics */
133 struct netfront_stats __percpu *stats;
134
125 unsigned long rx_gso_checksum_fixup; 135 unsigned long rx_gso_checksum_fixup;
126}; 136};
127 137
@@ -468,6 +478,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
468{ 478{
469 unsigned short id; 479 unsigned short id;
470 struct netfront_info *np = netdev_priv(dev); 480 struct netfront_info *np = netdev_priv(dev);
481 struct netfront_stats *stats = this_cpu_ptr(np->stats);
471 struct xen_netif_tx_request *tx; 482 struct xen_netif_tx_request *tx;
472 struct xen_netif_extra_info *extra; 483 struct xen_netif_extra_info *extra;
473 char *data = skb->data; 484 char *data = skb->data;
@@ -552,8 +563,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
552 if (notify) 563 if (notify)
553 notify_remote_via_irq(np->netdev->irq); 564 notify_remote_via_irq(np->netdev->irq);
554 565
555 dev->stats.tx_bytes += skb->len; 566 u64_stats_update_begin(&stats->syncp);
556 dev->stats.tx_packets++; 567 stats->tx_bytes += skb->len;
568 stats->tx_packets++;
569 u64_stats_update_end(&stats->syncp);
557 570
558 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ 571 /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
559 xennet_tx_buf_gc(dev); 572 xennet_tx_buf_gc(dev);
@@ -847,6 +860,8 @@ out:
847static int handle_incoming_queue(struct net_device *dev, 860static int handle_incoming_queue(struct net_device *dev,
848 struct sk_buff_head *rxq) 861 struct sk_buff_head *rxq)
849{ 862{
863 struct netfront_info *np = netdev_priv(dev);
864 struct netfront_stats *stats = this_cpu_ptr(np->stats);
850 int packets_dropped = 0; 865 int packets_dropped = 0;
851 struct sk_buff *skb; 866 struct sk_buff *skb;
852 867
@@ -871,8 +886,10 @@ static int handle_incoming_queue(struct net_device *dev,
871 continue; 886 continue;
872 } 887 }
873 888
874 dev->stats.rx_packets++; 889 u64_stats_update_begin(&stats->syncp);
875 dev->stats.rx_bytes += skb->len; 890 stats->rx_packets++;
891 stats->rx_bytes += skb->len;
892 u64_stats_update_end(&stats->syncp);
876 893
877 /* Pass it up. */ 894 /* Pass it up. */
878 netif_receive_skb(skb); 895 netif_receive_skb(skb);
@@ -1034,6 +1051,38 @@ static int xennet_change_mtu(struct net_device *dev, int mtu)
1034 return 0; 1051 return 0;
1035} 1052}
1036 1053
1054static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1055 struct rtnl_link_stats64 *tot)
1056{
1057 struct netfront_info *np = netdev_priv(dev);
1058 int cpu;
1059
1060 for_each_possible_cpu(cpu) {
1061 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1062 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1063 unsigned int start;
1064
1065 do {
1066 start = u64_stats_fetch_begin_bh(&stats->syncp);
1067
1068 rx_packets = stats->rx_packets;
1069 tx_packets = stats->tx_packets;
1070 rx_bytes = stats->rx_bytes;
1071 tx_bytes = stats->tx_bytes;
1072 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1073
1074 tot->rx_packets += rx_packets;
1075 tot->tx_packets += tx_packets;
1076 tot->rx_bytes += rx_bytes;
1077 tot->tx_bytes += tx_bytes;
1078 }
1079
1080 tot->rx_errors = dev->stats.rx_errors;
1081 tot->tx_dropped = dev->stats.tx_dropped;
1082
1083 return tot;
1084}
1085
1037static void xennet_release_tx_bufs(struct netfront_info *np) 1086static void xennet_release_tx_bufs(struct netfront_info *np)
1038{ 1087{
1039 struct sk_buff *skb; 1088 struct sk_buff *skb;
@@ -1182,6 +1231,7 @@ static const struct net_device_ops xennet_netdev_ops = {
1182 .ndo_stop = xennet_close, 1231 .ndo_stop = xennet_close,
1183 .ndo_start_xmit = xennet_start_xmit, 1232 .ndo_start_xmit = xennet_start_xmit,
1184 .ndo_change_mtu = xennet_change_mtu, 1233 .ndo_change_mtu = xennet_change_mtu,
1234 .ndo_get_stats64 = xennet_get_stats64,
1185 .ndo_set_mac_address = eth_mac_addr, 1235 .ndo_set_mac_address = eth_mac_addr,
1186 .ndo_validate_addr = eth_validate_addr, 1236 .ndo_validate_addr = eth_validate_addr,
1187 .ndo_fix_features = xennet_fix_features, 1237 .ndo_fix_features = xennet_fix_features,
@@ -1216,6 +1266,11 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1216 np->rx_refill_timer.data = (unsigned long)netdev; 1266 np->rx_refill_timer.data = (unsigned long)netdev;
1217 np->rx_refill_timer.function = rx_refill_timeout; 1267 np->rx_refill_timer.function = rx_refill_timeout;
1218 1268
1269 err = -ENOMEM;
1270 np->stats = alloc_percpu(struct netfront_stats);
1271 if (np->stats == NULL)
1272 goto exit;
1273
1219 /* Initialise tx_skbs as a free chain containing every entry. */ 1274 /* Initialise tx_skbs as a free chain containing every entry. */
1220 np->tx_skb_freelist = 0; 1275 np->tx_skb_freelist = 0;
1221 for (i = 0; i < NET_TX_RING_SIZE; i++) { 1276 for (i = 0; i < NET_TX_RING_SIZE; i++) {
@@ -1234,7 +1289,7 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1234 &np->gref_tx_head) < 0) { 1289 &np->gref_tx_head) < 0) {
1235 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n"); 1290 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
1236 err = -ENOMEM; 1291 err = -ENOMEM;
1237 goto exit; 1292 goto exit_free_stats;
1238 } 1293 }
1239 /* A grant for every rx ring slot */ 1294 /* A grant for every rx ring slot */
1240 if (gnttab_alloc_grant_references(RX_MAX_TARGET, 1295 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
@@ -1270,6 +1325,8 @@ static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev
1270 1325
1271 exit_free_tx: 1326 exit_free_tx:
1272 gnttab_free_grant_references(np->gref_tx_head); 1327 gnttab_free_grant_references(np->gref_tx_head);
1328 exit_free_stats:
1329 free_percpu(np->stats);
1273 exit: 1330 exit:
1274 free_netdev(netdev); 1331 free_netdev(netdev);
1275 return ERR_PTR(err); 1332 return ERR_PTR(err);
@@ -1869,6 +1926,8 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
1869 1926
1870 xennet_sysfs_delif(info->netdev); 1927 xennet_sysfs_delif(info->netdev);
1871 1928
1929 free_percpu(info->stats);
1930
1872 free_netdev(info->netdev); 1931 free_netdev(info->netdev);
1873 1932
1874 return 0; 1933 return 0;