aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2010-11-10 18:42:00 -0500
committerDavid S. Miller <davem@davemloft.net>2010-11-16 14:15:08 -0500
commit4af429d29b341bb1735f04c2fb960178ed5d52e7 (patch)
treeb5179224883dc56cde57058014480e4bcf22b75b /net/8021q
parent8ffab51b3dfc54876f145f15b351c41f3f703195 (diff)
vlan: lockless transmit path
vlan is a stacked device, like tunnels. We should use the lockless mechanism we are using in tunnels and loopback. This patch completely removes locking in TX path. tx stat counters are added into existing percpu stat structure, renamed from vlan_rx_stats to vlan_pcpu_stats. Note : this partially reverts commit 2e59af3dcbdf (vlan: multiqueue vlan device) Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q')
-rw-r--r--net/8021q/vlan.c4
-rw-r--r--net/8021q/vlan.h18
-rw-r--r--net/8021q/vlan_core.c4
-rw-r--r--net/8021q/vlan_dev.c61
-rw-r--r--net/8021q/vlan_netlink.c20
5 files changed, 50 insertions, 57 deletions
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 55d2135889fc..dc1071327d87 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -272,13 +272,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id); 272 snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
273 } 273 }
274 274
275 new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name, 275 new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
276 vlan_setup, real_dev->num_tx_queues);
277 276
278 if (new_dev == NULL) 277 if (new_dev == NULL)
279 return -ENOBUFS; 278 return -ENOBUFS;
280 279
281 netif_copy_real_num_queues(new_dev, real_dev);
282 dev_net_set(new_dev, net); 280 dev_net_set(new_dev, net);
283 /* need 4 bytes for extra VLAN header info, 281 /* need 4 bytes for extra VLAN header info,
284 * hope the underlying device can handle it. 282 * hope the underlying device can handle it.
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 4625ba64dfdc..5687c9b95f33 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -19,19 +19,25 @@ struct vlan_priority_tci_mapping {
19 19
20 20
21/** 21/**
22 * struct vlan_rx_stats - VLAN percpu rx stats 22 * struct vlan_pcpu_stats - VLAN percpu rx/tx stats
23 * @rx_packets: number of received packets 23 * @rx_packets: number of received packets
24 * @rx_bytes: number of received bytes 24 * @rx_bytes: number of received bytes
25 * @rx_multicast: number of received multicast packets 25 * @rx_multicast: number of received multicast packets
26 * @tx_packets: number of transmitted packets
27 * @tx_bytes: number of transmitted bytes
26 * @syncp: synchronization point for 64bit counters 28 * @syncp: synchronization point for 64bit counters
27 * @rx_errors: number of errors 29 * @rx_errors: number of rx errors
30 * @tx_dropped: number of tx drops
28 */ 31 */
29struct vlan_rx_stats { 32struct vlan_pcpu_stats {
30 u64 rx_packets; 33 u64 rx_packets;
31 u64 rx_bytes; 34 u64 rx_bytes;
32 u64 rx_multicast; 35 u64 rx_multicast;
36 u64 tx_packets;
37 u64 tx_bytes;
33 struct u64_stats_sync syncp; 38 struct u64_stats_sync syncp;
34 unsigned long rx_errors; 39 u32 rx_errors;
40 u32 tx_dropped;
35}; 41};
36 42
37/** 43/**
@@ -45,7 +51,7 @@ struct vlan_rx_stats {
45 * @real_dev: underlying netdevice 51 * @real_dev: underlying netdevice
46 * @real_dev_addr: address of underlying netdevice 52 * @real_dev_addr: address of underlying netdevice
47 * @dent: proc dir entry 53 * @dent: proc dir entry
48 * @vlan_rx_stats: ptr to percpu rx stats 54 * @vlan_pcpu_stats: ptr to percpu rx stats
49 */ 55 */
50struct vlan_dev_info { 56struct vlan_dev_info {
51 unsigned int nr_ingress_mappings; 57 unsigned int nr_ingress_mappings;
@@ -60,7 +66,7 @@ struct vlan_dev_info {
60 unsigned char real_dev_addr[ETH_ALEN]; 66 unsigned char real_dev_addr[ETH_ALEN];
61 67
62 struct proc_dir_entry *dent; 68 struct proc_dir_entry *dent;
63 struct vlan_rx_stats __percpu *vlan_rx_stats; 69 struct vlan_pcpu_stats __percpu *vlan_pcpu_stats;
64}; 70};
65 71
66static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev) 72static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 69b2f79800a5..ce8e3ab3e7a5 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
9 struct sk_buff *skb = *skbp; 9 struct sk_buff *skb = *skbp;
10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; 10 u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
11 struct net_device *vlan_dev; 11 struct net_device *vlan_dev;
12 struct vlan_rx_stats *rx_stats; 12 struct vlan_pcpu_stats *rx_stats;
13 13
14 vlan_dev = vlan_find_dev(skb->dev, vlan_id); 14 vlan_dev = vlan_find_dev(skb->dev, vlan_id);
15 if (!vlan_dev) { 15 if (!vlan_dev) {
@@ -26,7 +26,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 27 skb->vlan_tci = 0;
28 28
29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats); 29 rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
30 30
31 u64_stats_update_begin(&rx_stats->syncp); 31 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 32 rx_stats->rx_packets++;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index f3c9552f6ba8..2fa3f4a3f60f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
141 struct packet_type *ptype, struct net_device *orig_dev) 141 struct packet_type *ptype, struct net_device *orig_dev)
142{ 142{
143 struct vlan_hdr *vhdr; 143 struct vlan_hdr *vhdr;
144 struct vlan_rx_stats *rx_stats; 144 struct vlan_pcpu_stats *rx_stats;
145 struct net_device *vlan_dev; 145 struct net_device *vlan_dev;
146 u16 vlan_id; 146 u16 vlan_id;
147 u16 vlan_tci; 147 u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
177 } else { 177 } else {
178 skb->dev = vlan_dev; 178 skb->dev = vlan_dev;
179 179
180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats); 180 rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
181 181
182 u64_stats_update_begin(&rx_stats->syncp); 182 u64_stats_update_begin(&rx_stats->syncp);
183 rx_stats->rx_packets++; 183 rx_stats->rx_packets++;
@@ -310,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, 310static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
311 struct net_device *dev) 311 struct net_device *dev)
312{ 312{
313 int i = skb_get_queue_mapping(skb);
314 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
315 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 313 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
316 unsigned int len; 314 unsigned int len;
317 int ret; 315 int ret;
@@ -334,10 +332,16 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
334 ret = dev_queue_xmit(skb); 332 ret = dev_queue_xmit(skb);
335 333
336 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 334 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
337 txq->tx_packets++; 335 struct vlan_pcpu_stats *stats;
338 txq->tx_bytes += len; 336
339 } else 337 stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
340 txq->tx_dropped++; 338 u64_stats_update_begin(&stats->syncp);
339 stats->tx_packets++;
340 stats->tx_bytes += len;
341 u64_stats_update_begin(&stats->syncp);
342 } else {
343 this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
344 }
341 345
342 return ret; 346 return ret;
343} 347}
@@ -696,6 +700,7 @@ static int vlan_dev_init(struct net_device *dev)
696 (1<<__LINK_STATE_PRESENT); 700 (1<<__LINK_STATE_PRESENT);
697 701
698 dev->features |= real_dev->features & real_dev->vlan_features; 702 dev->features |= real_dev->features & real_dev->vlan_features;
703 dev->features |= NETIF_F_LLTX;
699 dev->gso_max_size = real_dev->gso_max_size; 704 dev->gso_max_size = real_dev->gso_max_size;
700 705
701 /* ipv6 shared card related stuff */ 706 /* ipv6 shared card related stuff */
@@ -728,8 +733,8 @@ static int vlan_dev_init(struct net_device *dev)
728 733
729 vlan_dev_set_lockdep_class(dev, subclass); 734 vlan_dev_set_lockdep_class(dev, subclass);
730 735
731 vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats); 736 vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
732 if (!vlan_dev_info(dev)->vlan_rx_stats) 737 if (!vlan_dev_info(dev)->vlan_pcpu_stats)
733 return -ENOMEM; 738 return -ENOMEM;
734 739
735 return 0; 740 return 0;
@@ -741,8 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev)
741 struct vlan_dev_info *vlan = vlan_dev_info(dev); 746 struct vlan_dev_info *vlan = vlan_dev_info(dev);
742 int i; 747 int i;
743 748
744 free_percpu(vlan->vlan_rx_stats); 749 free_percpu(vlan->vlan_pcpu_stats);
745 vlan->vlan_rx_stats = NULL; 750 vlan->vlan_pcpu_stats = NULL;
746 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 751 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
747 while ((pm = vlan->egress_priority_map[i]) != NULL) { 752 while ((pm = vlan->egress_priority_map[i]) != NULL) {
748 vlan->egress_priority_map[i] = pm->next; 753 vlan->egress_priority_map[i] = pm->next;
@@ -780,33 +785,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
780 785
781static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) 786static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
782{ 787{
783 dev_txq_stats_fold(dev, stats);
784 788
785 if (vlan_dev_info(dev)->vlan_rx_stats) { 789 if (vlan_dev_info(dev)->vlan_pcpu_stats) {
786 struct vlan_rx_stats *p, accum = {0}; 790 struct vlan_pcpu_stats *p;
791 u32 rx_errors = 0, tx_dropped = 0;
787 int i; 792 int i;
788 793
789 for_each_possible_cpu(i) { 794 for_each_possible_cpu(i) {
790 u64 rxpackets, rxbytes, rxmulticast; 795 u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
791 unsigned int start; 796 unsigned int start;
792 797
793 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i); 798 p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
794 do { 799 do {
795 start = u64_stats_fetch_begin_bh(&p->syncp); 800 start = u64_stats_fetch_begin_bh(&p->syncp);
796 rxpackets = p->rx_packets; 801 rxpackets = p->rx_packets;
797 rxbytes = p->rx_bytes; 802 rxbytes = p->rx_bytes;
798 rxmulticast = p->rx_multicast; 803 rxmulticast = p->rx_multicast;
804 txpackets = p->tx_packets;
805 txbytes = p->tx_bytes;
799 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 806 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
800 accum.rx_packets += rxpackets; 807
801 accum.rx_bytes += rxbytes; 808 stats->rx_packets += rxpackets;
802 accum.rx_multicast += rxmulticast; 809 stats->rx_bytes += rxbytes;
803 /* rx_errors is ulong, not protected by syncp */ 810 stats->multicast += rxmulticast;
804 accum.rx_errors += p->rx_errors; 811 stats->tx_packets += txpackets;
812 stats->tx_bytes += txbytes;
813 /* rx_errors & tx_dropped are u32 */
814 rx_errors += p->rx_errors;
815 tx_dropped += p->tx_dropped;
805 } 816 }
806 stats->rx_packets = accum.rx_packets; 817 stats->rx_errors = rx_errors;
807 stats->rx_bytes = accum.rx_bytes; 818 stats->tx_dropped = tx_dropped;
808 stats->rx_errors = accum.rx_errors;
809 stats->multicast = accum.rx_multicast;
810 } 819 }
811 return stats; 820 return stats;
812} 821}
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index ddc105734af7..be9a5c19a775 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -101,25 +101,6 @@ static int vlan_changelink(struct net_device *dev,
101 return 0; 101 return 0;
102} 102}
103 103
104static int vlan_get_tx_queues(struct net *net,
105 struct nlattr *tb[],
106 unsigned int *num_tx_queues,
107 unsigned int *real_num_tx_queues)
108{
109 struct net_device *real_dev;
110
111 if (!tb[IFLA_LINK])
112 return -EINVAL;
113
114 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
115 if (!real_dev)
116 return -ENODEV;
117
118 *num_tx_queues = real_dev->num_tx_queues;
119 *real_num_tx_queues = real_dev->real_num_tx_queues;
120 return 0;
121}
122
123static int vlan_newlink(struct net *src_net, struct net_device *dev, 104static int vlan_newlink(struct net *src_net, struct net_device *dev,
124 struct nlattr *tb[], struct nlattr *data[]) 105 struct nlattr *tb[], struct nlattr *data[])
125{ 106{
@@ -237,7 +218,6 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
237 .maxtype = IFLA_VLAN_MAX, 218 .maxtype = IFLA_VLAN_MAX,
238 .policy = vlan_policy, 219 .policy = vlan_policy,
239 .priv_size = sizeof(struct vlan_dev_info), 220 .priv_size = sizeof(struct vlan_dev_info),
240 .get_tx_queues = vlan_get_tx_queues,
241 .setup = vlan_setup, 221 .setup = vlan_setup,
242 .validate = vlan_validate, 222 .validate = vlan_validate,
243 .newlink = vlan_newlink, 223 .newlink = vlan_newlink,