aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_gre.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-09-26 23:57:11 -0400
committerDavid S. Miller <davem@davemloft.net>2010-09-28 00:30:43 -0400
commite985aad723d7709e6bee566bacb100d33d9b791b (patch)
tree9b90751911cd8923dd2469d4f52a5628c0f4cc0c /net/ipv4/ip_gre.c
parent290b895e0ba4552dfcfc4bd35759c192345b934a (diff)
ip_gre: percpu stats accounting
Le lundi 27 septembre 2010 à 14:29 +0100, Ben Hutchings a écrit : > > diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c > > index 5d6ddcb..de39b22 100644 > > --- a/net/ipv4/ip_gre.c > > +++ b/net/ipv4/ip_gre.c > [...] > > @@ -377,7 +405,7 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net, > > if (parms->name[0]) > > strlcpy(name, parms->name, IFNAMSIZ); > > else > > - sprintf(name, "gre%%d"); > > + strcpy(name, "gre%d"); > > > > dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup); > > if (!dev) > [...] > > This is a valid fix, but doesn't belong in this patch! > Sorry ? It was not a fix, but at most a cleanup ;) Anyway I forgot the gretap case... [PATCH 2/4 v2] ip_gre: percpu stats accounting Maintain per_cpu tx_bytes, tx_packets, rx_bytes, rx_packets. Other seldom used fields are kept in netdev->stats structure, possibly unsafe. This is a preliminary work to support lockless transmit path, and correct RX stats, that are already unsafe. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/ip_gre.c')
-rw-r--r--net/ipv4/ip_gre.c143
1 files changed, 104 insertions, 39 deletions
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 5d6ddcb7403b..a1b5d5e03064 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -165,6 +165,34 @@ struct ipgre_net {
165#define for_each_ip_tunnel_rcu(start) \ 165#define for_each_ip_tunnel_rcu(start) \
166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next)) 166 for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
167 167
168/* often modified stats are per cpu, other are shared (netdev->stats) */
169struct pcpu_tstats {
170 unsigned long rx_packets;
171 unsigned long rx_bytes;
172 unsigned long tx_packets;
173 unsigned long tx_bytes;
174};
175
176static struct net_device_stats *ipgre_get_stats(struct net_device *dev)
177{
178 struct pcpu_tstats sum = { 0 };
179 int i;
180
181 for_each_possible_cpu(i) {
182 const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
183
184 sum.rx_packets += tstats->rx_packets;
185 sum.rx_bytes += tstats->rx_bytes;
186 sum.tx_packets += tstats->tx_packets;
187 sum.tx_bytes += tstats->tx_bytes;
188 }
189 dev->stats.rx_packets = sum.rx_packets;
190 dev->stats.rx_bytes = sum.rx_bytes;
191 dev->stats.tx_packets = sum.tx_packets;
192 dev->stats.tx_bytes = sum.tx_bytes;
193 return &dev->stats;
194}
195
168/* Given src, dst and key, find appropriate for input tunnel. */ 196/* Given src, dst and key, find appropriate for input tunnel. */
169 197
170static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev, 198static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
@@ -584,7 +612,7 @@ static int ipgre_rcv(struct sk_buff *skb)
584 if ((tunnel = ipgre_tunnel_lookup(skb->dev, 612 if ((tunnel = ipgre_tunnel_lookup(skb->dev,
585 iph->saddr, iph->daddr, key, 613 iph->saddr, iph->daddr, key,
586 gre_proto))) { 614 gre_proto))) {
587 struct net_device_stats *stats = &tunnel->dev->stats; 615 struct pcpu_tstats *tstats;
588 616
589 secpath_reset(skb); 617 secpath_reset(skb);
590 618
@@ -608,22 +636,22 @@ static int ipgre_rcv(struct sk_buff *skb)
608 /* Looped back packet, drop it! */ 636 /* Looped back packet, drop it! */
609 if (skb_rtable(skb)->fl.iif == 0) 637 if (skb_rtable(skb)->fl.iif == 0)
610 goto drop; 638 goto drop;
611 stats->multicast++; 639 tunnel->dev->stats.multicast++;
612 skb->pkt_type = PACKET_BROADCAST; 640 skb->pkt_type = PACKET_BROADCAST;
613 } 641 }
614#endif 642#endif
615 643
616 if (((flags&GRE_CSUM) && csum) || 644 if (((flags&GRE_CSUM) && csum) ||
617 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { 645 (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) {
618 stats->rx_crc_errors++; 646 tunnel->dev->stats.rx_crc_errors++;
619 stats->rx_errors++; 647 tunnel->dev->stats.rx_errors++;
620 goto drop; 648 goto drop;
621 } 649 }
622 if (tunnel->parms.i_flags&GRE_SEQ) { 650 if (tunnel->parms.i_flags&GRE_SEQ) {
623 if (!(flags&GRE_SEQ) || 651 if (!(flags&GRE_SEQ) ||
624 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { 652 (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) {
625 stats->rx_fifo_errors++; 653 tunnel->dev->stats.rx_fifo_errors++;
626 stats->rx_errors++; 654 tunnel->dev->stats.rx_errors++;
627 goto drop; 655 goto drop;
628 } 656 }
629 tunnel->i_seqno = seqno + 1; 657 tunnel->i_seqno = seqno + 1;
@@ -632,8 +660,8 @@ static int ipgre_rcv(struct sk_buff *skb)
632 /* Warning: All skb pointers will be invalidated! */ 660 /* Warning: All skb pointers will be invalidated! */
633 if (tunnel->dev->type == ARPHRD_ETHER) { 661 if (tunnel->dev->type == ARPHRD_ETHER) {
634 if (!pskb_may_pull(skb, ETH_HLEN)) { 662 if (!pskb_may_pull(skb, ETH_HLEN)) {
635 stats->rx_length_errors++; 663 tunnel->dev->stats.rx_length_errors++;
636 stats->rx_errors++; 664 tunnel->dev->stats.rx_errors++;
637 goto drop; 665 goto drop;
638 } 666 }
639 667
@@ -642,13 +670,17 @@ static int ipgre_rcv(struct sk_buff *skb)
642 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); 670 skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
643 } 671 }
644 672
645 skb_tunnel_rx(skb, tunnel->dev); 673 tstats = this_cpu_ptr(tunnel->dev->tstats);
674 tstats->rx_packets++;
675 tstats->rx_bytes += skb->len;
676
677 __skb_tunnel_rx(skb, tunnel->dev);
646 678
647 skb_reset_network_header(skb); 679 skb_reset_network_header(skb);
648 ipgre_ecn_decapsulate(iph, skb); 680 ipgre_ecn_decapsulate(iph, skb);
649 681
650 if (netif_rx(skb) == NET_RX_DROP) 682 if (netif_rx(skb) == NET_RX_DROP)
651 stats->rx_dropped++; 683 tunnel->dev->stats.rx_dropped++;
652 684
653 rcu_read_unlock(); 685 rcu_read_unlock();
654 return 0; 686 return 0;
@@ -665,8 +697,7 @@ drop_nolock:
665static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) 697static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
666{ 698{
667 struct ip_tunnel *tunnel = netdev_priv(dev); 699 struct ip_tunnel *tunnel = netdev_priv(dev);
668 struct net_device_stats *stats = &dev->stats; 700 struct pcpu_tstats *tstats;
669 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
670 struct iphdr *old_iph = ip_hdr(skb); 701 struct iphdr *old_iph = ip_hdr(skb);
671 struct iphdr *tiph; 702 struct iphdr *tiph;
672 u8 tos; 703 u8 tos;
@@ -694,7 +725,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
694 /* NBMA tunnel */ 725 /* NBMA tunnel */
695 726
696 if (skb_dst(skb) == NULL) { 727 if (skb_dst(skb) == NULL) {
697 stats->tx_fifo_errors++; 728 dev->stats.tx_fifo_errors++;
698 goto tx_error; 729 goto tx_error;
699 } 730 }
700 731
@@ -740,14 +771,20 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
740 } 771 }
741 772
742 { 773 {
743 struct flowi fl = { .oif = tunnel->parms.link, 774 struct flowi fl = {
744 .nl_u = { .ip4_u = 775 .oif = tunnel->parms.link,
745 { .daddr = dst, 776 .nl_u = {
746 .saddr = tiph->saddr, 777 .ip4_u = {
747 .tos = RT_TOS(tos) } }, 778 .daddr = dst,
748 .proto = IPPROTO_GRE }; 779 .saddr = tiph->saddr,
780 .tos = RT_TOS(tos)
781 }
782 },
783 .proto = IPPROTO_GRE
784 }
785;
749 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 786 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
750 stats->tx_carrier_errors++; 787 dev->stats.tx_carrier_errors++;
751 goto tx_error; 788 goto tx_error;
752 } 789 }
753 } 790 }
@@ -755,7 +792,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
755 792
756 if (tdev == dev) { 793 if (tdev == dev) {
757 ip_rt_put(rt); 794 ip_rt_put(rt);
758 stats->collisions++; 795 dev->stats.collisions++;
759 goto tx_error; 796 goto tx_error;
760 } 797 }
761 798
@@ -818,7 +855,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
818 dev->needed_headroom = max_headroom; 855 dev->needed_headroom = max_headroom;
819 if (!new_skb) { 856 if (!new_skb) {
820 ip_rt_put(rt); 857 ip_rt_put(rt);
821 txq->tx_dropped++; 858 dev->stats.tx_dropped++;
822 dev_kfree_skb(skb); 859 dev_kfree_skb(skb);
823 return NETDEV_TX_OK; 860 return NETDEV_TX_OK;
824 } 861 }
@@ -885,15 +922,15 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
885 } 922 }
886 923
887 nf_reset(skb); 924 nf_reset(skb);
888 925 tstats = this_cpu_ptr(dev->tstats);
889 IPTUNNEL_XMIT(); 926 __IPTUNNEL_XMIT(tstats, &dev->stats);
890 return NETDEV_TX_OK; 927 return NETDEV_TX_OK;
891 928
892tx_error_icmp: 929tx_error_icmp:
893 dst_link_failure(skb); 930 dst_link_failure(skb);
894 931
895tx_error: 932tx_error:
896 stats->tx_errors++; 933 dev->stats.tx_errors++;
897 dev_kfree_skb(skb); 934 dev_kfree_skb(skb);
898 return NETDEV_TX_OK; 935 return NETDEV_TX_OK;
899} 936}
@@ -913,13 +950,19 @@ static int ipgre_tunnel_bind_dev(struct net_device *dev)
913 /* Guess output device to choose reasonable mtu and needed_headroom */ 950 /* Guess output device to choose reasonable mtu and needed_headroom */
914 951
915 if (iph->daddr) { 952 if (iph->daddr) {
916 struct flowi fl = { .oif = tunnel->parms.link, 953 struct flowi fl = {
917 .nl_u = { .ip4_u = 954 .oif = tunnel->parms.link,
918 { .daddr = iph->daddr, 955 .nl_u = {
919 .saddr = iph->saddr, 956 .ip4_u = {
920 .tos = RT_TOS(iph->tos) } }, 957 .daddr = iph->daddr,
921 .proto = IPPROTO_GRE }; 958 .saddr = iph->saddr,
959 .tos = RT_TOS(iph->tos)
960 }
961 },
962 .proto = IPPROTO_GRE
963 };
922 struct rtable *rt; 964 struct rtable *rt;
965
923 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 966 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) {
924 tdev = rt->dst.dev; 967 tdev = rt->dst.dev;
925 ip_rt_put(rt); 968 ip_rt_put(rt);
@@ -1171,13 +1214,19 @@ static int ipgre_open(struct net_device *dev)
1171 struct ip_tunnel *t = netdev_priv(dev); 1214 struct ip_tunnel *t = netdev_priv(dev);
1172 1215
1173 if (ipv4_is_multicast(t->parms.iph.daddr)) { 1216 if (ipv4_is_multicast(t->parms.iph.daddr)) {
1174 struct flowi fl = { .oif = t->parms.link, 1217 struct flowi fl = {
1175 .nl_u = { .ip4_u = 1218 .oif = t->parms.link,
1176 { .daddr = t->parms.iph.daddr, 1219 .nl_u = {
1177 .saddr = t->parms.iph.saddr, 1220 .ip4_u = {
1178 .tos = RT_TOS(t->parms.iph.tos) } }, 1221 .daddr = t->parms.iph.daddr,
1179 .proto = IPPROTO_GRE }; 1222 .saddr = t->parms.iph.saddr,
1223 .tos = RT_TOS(t->parms.iph.tos)
1224 }
1225 },
1226 .proto = IPPROTO_GRE
1227 };
1180 struct rtable *rt; 1228 struct rtable *rt;
1229
1181 if (ip_route_output_key(dev_net(dev), &rt, &fl)) 1230 if (ip_route_output_key(dev_net(dev), &rt, &fl))
1182 return -EADDRNOTAVAIL; 1231 return -EADDRNOTAVAIL;
1183 dev = rt->dst.dev; 1232 dev = rt->dst.dev;
@@ -1217,12 +1266,19 @@ static const struct net_device_ops ipgre_netdev_ops = {
1217 .ndo_start_xmit = ipgre_tunnel_xmit, 1266 .ndo_start_xmit = ipgre_tunnel_xmit,
1218 .ndo_do_ioctl = ipgre_tunnel_ioctl, 1267 .ndo_do_ioctl = ipgre_tunnel_ioctl,
1219 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1268 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1269 .ndo_get_stats = ipgre_get_stats,
1220}; 1270};
1221 1271
1272static void ipgre_dev_free(struct net_device *dev)
1273{
1274 free_percpu(dev->tstats);
1275 free_netdev(dev);
1276}
1277
1222static void ipgre_tunnel_setup(struct net_device *dev) 1278static void ipgre_tunnel_setup(struct net_device *dev)
1223{ 1279{
1224 dev->netdev_ops = &ipgre_netdev_ops; 1280 dev->netdev_ops = &ipgre_netdev_ops;
1225 dev->destructor = free_netdev; 1281 dev->destructor = ipgre_dev_free;
1226 1282
1227 dev->type = ARPHRD_IPGRE; 1283 dev->type = ARPHRD_IPGRE;
1228 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4; 1284 dev->needed_headroom = LL_MAX_HEADER + sizeof(struct iphdr) + 4;
@@ -1260,6 +1316,10 @@ static int ipgre_tunnel_init(struct net_device *dev)
1260 } else 1316 } else
1261 dev->header_ops = &ipgre_header_ops; 1317 dev->header_ops = &ipgre_header_ops;
1262 1318
1319 dev->tstats = alloc_percpu(struct pcpu_tstats);
1320 if (!dev->tstats)
1321 return -ENOMEM;
1322
1263 return 0; 1323 return 0;
1264} 1324}
1265 1325
@@ -1446,6 +1506,10 @@ static int ipgre_tap_init(struct net_device *dev)
1446 1506
1447 ipgre_tunnel_bind_dev(dev); 1507 ipgre_tunnel_bind_dev(dev);
1448 1508
1509 dev->tstats = alloc_percpu(struct pcpu_tstats);
1510 if (!dev->tstats)
1511 return -ENOMEM;
1512
1449 return 0; 1513 return 0;
1450} 1514}
1451 1515
@@ -1456,6 +1520,7 @@ static const struct net_device_ops ipgre_tap_netdev_ops = {
1456 .ndo_set_mac_address = eth_mac_addr, 1520 .ndo_set_mac_address = eth_mac_addr,
1457 .ndo_validate_addr = eth_validate_addr, 1521 .ndo_validate_addr = eth_validate_addr,
1458 .ndo_change_mtu = ipgre_tunnel_change_mtu, 1522 .ndo_change_mtu = ipgre_tunnel_change_mtu,
1523 .ndo_get_stats = ipgre_get_stats,
1459}; 1524};
1460 1525
1461static void ipgre_tap_setup(struct net_device *dev) 1526static void ipgre_tap_setup(struct net_device *dev)
@@ -1464,7 +1529,7 @@ static void ipgre_tap_setup(struct net_device *dev)
1464 ether_setup(dev); 1529 ether_setup(dev);
1465 1530
1466 dev->netdev_ops = &ipgre_tap_netdev_ops; 1531 dev->netdev_ops = &ipgre_tap_netdev_ops;
1467 dev->destructor = free_netdev; 1532 dev->destructor = ipgre_dev_free;
1468 1533
1469 dev->iflink = 0; 1534 dev->iflink = 0;
1470 dev->features |= NETIF_F_NETNS_LOCAL; 1535 dev->features |= NETIF_F_NETNS_LOCAL;