aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q/vlan_dev.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-11-10 18:42:00 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-16 14:15:08 -0500
commit4af429d29b341bb1735f04c2fb960178ed5d52e7 (patch)
treeb5179224883dc56cde57058014480e4bcf22b75b /net/8021q/vlan_dev.c
parent8ffab51b3dfc54876f145f15b351c41f3f703195 (diff)
vlan: lockless transmit path
vlan is a stacked device, like tunnels. We should use the lockless mechanism we are using in tunnels and loopback. This patch completely removes locking in TX path. tx stat counters are added into existing percpu stat structure, renamed from vlan_rx_stats to vlan_pcpu_stats. Note : this partially reverts commit 2e59af3dcbdf (vlan: multiqueue vlan device) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q/vlan_dev.c')
-rw-r--r--net/8021q/vlan_dev.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f3c9552f6ba8..2fa3f4a3f60f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev) 141 struct packet_type *ptype, struct net_device *orig_dev)
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev; 145 struct net_device *vlan_dev;
146 u16 vlan_id; 146 u16 vlan_id;
147 u16 vlan_tci; 147 u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
177 } else { 177 } else {
178 skb->dev = vlan_dev; 178 skb->dev = vlan_dev;
179 179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats); 180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181 181
182 u64_stats_update_begin(&rx_stats->syncp); 182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++; 183 rx_stats->rx_packets++;
@@ -310,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
311 struct net_device *dev) 311 struct net_device *dev)
312{ 312{
313 int i = skb_get_queue_mapping(skb);
314 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
315 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 313 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
316 unsigned int len; 314 unsigned int len;
317 int ret; 315 int ret;
@@ -334,10 +332,16 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
334 ret = dev_queue_xmit(skb); 332 ret = dev_queue_xmit(skb);
335 333
336 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 334 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
337 txq->tx_packets++; 335 struct vlan_pcpu_stats *stats;
338 txq->tx_bytes += len; 336
339 } else 337 stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
340 txq->tx_dropped++; 338 u64_stats_update_begin(&stats->syncp);
339 stats->tx_packets++;
340 stats->tx_bytes += len;
341 u64_stats_update_begin(&stats->syncp);
342 } else {
343 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
344 }
341 345
342 return ret; 346 return ret;
343} 347}
@@ -696,6 +700,7 @@ static int vlan_dev_init(struct net_device *dev)
696 (1<<__LINK_STATE_PRESENT); 700 (1<<__LINK_STATE_PRESENT);
697 701
698 dev->features |= real_dev->features & real_dev->vlan_features; 702 dev->features |= real_dev->features & real_dev->vlan_features;
703 dev->features |= NETIF_F_LLTX;
699 dev->gso_max_size = real_dev->gso_max_size; 704 dev->gso_max_size = real_dev->gso_max_size;
700 705
701 /* ipv6 shared card related stuff */ 706 /* ipv6 shared card related stuff */
@@ -728,8 +733,8 @@ static int vlan_dev_init(struct net_device *dev)
728 733
729 vlan_dev_set_lockdep_class(dev, subclass); 734 vlan_dev_set_lockdep_class(dev, subclass);
730 735
731 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); 736 vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
732 if (!vlan_dev_info(dev)->vlan_rx_stats) 737 if (!vlan_dev_info(dev)->vlan_pcpu_stats)
733 return -ENOMEM; 738 return -ENOMEM;
734 739
735 return 0; 740 return 0;
@@ -741,8 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev)
741 struct vlan_dev_info *vlan = vlan_dev_info(dev); 746 struct vlan_dev_info *vlan = vlan_dev_info(dev);
742 int i; 747 int i;
743 748
744 free_percpu(vlan->vlan_rx_stats); 749 free_percpu(vlan->vlan_pcpu_stats);
745 vlan->vlan_rx_stats = NULL; 750 vlan->vlan_pcpu_stats = NULL;
746 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 751 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
747 while ((pm = vlan->egress_priority_map[i]) != NULL) { 752 while ((pm = vlan->egress_priority_map[i]) != NULL) {
748 vlan->egress_priority_map[i] = pm->next; 753 vlan->egress_priority_map[i] = pm->next;
@@ -780,33 +785,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
780 785
781static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 786static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
782{ 787{
783 dev_txq_stats_fold(dev, stats);
784 788
785 if (vlan_dev_info(dev)->vlan_rx_stats) { 789 if (vlan_dev_info(dev)->vlan_pcpu_stats) {
786 struct vlan_rx_stats *p, accum = {0}; 790 struct vlan_pcpu_stats *p;
791 u32 rx_errors = 0, tx_dropped = 0;
787 int i; 792 int i;
788 793
789 for_each_possible_cpu(i) { 794 for_each_possible_cpu(i) {
790 u64 rxpackets, rxbytes, rxmulticast; 795 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
791 unsigned int start; 796 unsigned int start;
792 797
793 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); 798 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
794 do { 799 do {
795 start = u64_stats_fetch_begin_bh(&p->syncp); 800 start = u64_stats_fetch_begin_bh(&p->syncp);
796 rxpackets = p->rx_packets; 801 rxpackets = p->rx_packets;
797 rxbytes = p->rx_bytes; 802 rxbytes = p->rx_bytes;
798 rxmulticast = p->rx_multicast; 803 rxmulticast = p->rx_multicast;
804 txpackets = p->tx_packets;
805 txbytes = p->tx_bytes;
799 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 806 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
800 accum.rx_packets += rxpackets; 807
801 accum.rx_bytes += rxbytes; 808 stats->rx_packets += rxpackets;
802 accum.rx_multicast += rxmulticast; 809 stats->rx_bytes += rxbytes;
803 /* rx_errors is ulong, not protected by syncp */ 810 stats->multicast += rxmulticast;
804 accum.rx_errors += p->rx_errors; 811 stats->tx_packets += txpackets;
812 stats->tx_bytes += txbytes;
813 /* rx_errors & tx_dropped are u32 */
814 rx_errors += p->rx_errors;
815 tx_dropped += p->tx_dropped;
805 } 816 }
806 stats->rx_packets = accum.rx_packets; 817 stats->rx_errors = rx_errors;
807 stats->rx_bytes = accum.rx_bytes; 818 stats->tx_dropped = tx_dropped;
808 stats->rx_errors = accum.rx_errors;
809 stats->multicast = accum.rx_multicast;
810 } 819 }
811 return stats; 820 return stats;
812} 821}