aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q/vlan_dev.c
diff options
context:
space:
mode:
authorEric Dumazet <dada1@cosmosbay.com>2009-05-17 20:35:38 -0400
committerDavid S. Miller <davem@davemloft.net>2009-05-18 18:15:06 -0400
commit450c4ea15ecb89567e6a75b89cbb8a598a7efb75 (patch)
tree5e99e5e58ca2e5e046d47963773ef66857bec46a /net/8021q/vlan_dev.c
parent7004bf252c53da18f6b55103e0c92f777f846806 (diff)
vlan: use struct netdev_queue counters instead of dev->stats
We can update netdev_queue tx_bytes/tx_packets/tx_dropped counters instead of dev->stats ones, to reduce number of cache lines dirtied in xmit path. This fixes a performance problem on SMP when many different cpus take vlan tx path. Signed-off-by: Eric Dumazet <dada1@cosmosbay.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q/vlan_dev.c')
-rw-r--r--net/8021q/vlan_dev.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 25ba41e35338..8faacee68633 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -290,7 +290,7 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
290 290
291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 291static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
292{ 292{
293 struct net_device_stats *stats = &dev->stats; 293 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 294 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
295 295
296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 296 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
@@ -309,7 +309,7 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 309 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
310 skb = __vlan_put_tag(skb, vlan_tci); 310 skb = __vlan_put_tag(skb, vlan_tci);
311 if (!skb) { 311 if (!skb) {
312 stats->tx_dropped++; 312 txq->tx_dropped++;
313 return NETDEV_TX_OK; 313 return NETDEV_TX_OK;
314 } 314 }
315 315
@@ -317,8 +317,8 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 317 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
318 } 318 }
319 319
320 stats->tx_packets++; 320 txq->tx_packets++;
321 stats->tx_bytes += skb->len; 321 txq->tx_bytes += skb->len;
322 322
323 skb->dev = vlan_dev_info(dev)->real_dev; 323 skb->dev = vlan_dev_info(dev)->real_dev;
324 dev_queue_xmit(skb); 324 dev_queue_xmit(skb);
@@ -328,15 +328,15 @@ static int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb, 328static int vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
329 struct net_device *dev) 329 struct net_device *dev)
330{ 330{
331 struct net_device_stats *stats = &dev->stats; 331 struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
332 u16 vlan_tci; 332 u16 vlan_tci;
333 333
334 vlan_tci = vlan_dev_info(dev)->vlan_id; 334 vlan_tci = vlan_dev_info(dev)->vlan_id;
335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 335 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 336 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
337 337
338 stats->tx_packets++; 338 txq->tx_packets++;
339 stats->tx_bytes += skb->len; 339 txq->tx_bytes += skb->len;
340 340
341 skb->dev = vlan_dev_info(dev)->real_dev; 341 skb->dev = vlan_dev_info(dev)->real_dev;
342 dev_queue_xmit(skb); 342 dev_queue_xmit(skb);