aboutsummaryrefslogtreecommitdiffstats
path: root/net/8021q/vlan_dev.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-09-02 20:39:16 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-03 23:02:17 -0400
commit1a123a3168566b10f87f228ae963770b26f27420 (patch)
treecebf893e0ae4de5ee5077b69002f41d8f61d45f9 /net/8021q/vlan_dev.c
parent2c11455321f37da6fe6cc36353149f9ac9183334 (diff)
vlan: adds drops accounting
Its hard to tell if vlans are dropping frames, since every frame given to vlan_???_start_xmit() functions is accounted as fully transmitted by lower device. We can test dev_queue_xmit() return values to properly account for dropped frames. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/8021q/vlan_dev.c')
-rw-r--r--net/8021q/vlan_dev.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 3938c3e50fb1..4198ec5c8abc 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -294,6 +294,8 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
294 int i = skb_get_queue_mapping(skb); 294 int i = skb_get_queue_mapping(skb);
295 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 295 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
296 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); 296 struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
297 unsigned int len;
298 int ret;
297 299
298 /* Handle non-VLAN frames if they are sent to us, for example by DHCP. 300 /* Handle non-VLAN frames if they are sent to us, for example by DHCP.
299 * 301 *
@@ -319,11 +321,17 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
319 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++; 321 vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
320 } 322 }
321 323
322 txq->tx_packets++;
323 txq->tx_bytes += skb->len;
324 324
325 skb->dev = vlan_dev_info(dev)->real_dev; 325 skb->dev = vlan_dev_info(dev)->real_dev;
326 dev_queue_xmit(skb); 326 len = skb->len;
327 ret = dev_queue_xmit(skb);
328
329 if (likely(ret == NET_XMIT_SUCCESS)) {
330 txq->tx_packets++;
331 txq->tx_bytes += len;
332 } else
333 txq->tx_dropped++;
334
327 return NETDEV_TX_OK; 335 return NETDEV_TX_OK;
328} 336}
329 337
@@ -333,16 +341,23 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
333 int i = skb_get_queue_mapping(skb); 341 int i = skb_get_queue_mapping(skb);
334 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); 342 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
335 u16 vlan_tci; 343 u16 vlan_tci;
344 unsigned int len;
345 int ret;
336 346
337 vlan_tci = vlan_dev_info(dev)->vlan_id; 347 vlan_tci = vlan_dev_info(dev)->vlan_id;
338 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); 348 vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
339 skb = __vlan_hwaccel_put_tag(skb, vlan_tci); 349 skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
340 350
341 txq->tx_packets++;
342 txq->tx_bytes += skb->len;
343
344 skb->dev = vlan_dev_info(dev)->real_dev; 351 skb->dev = vlan_dev_info(dev)->real_dev;
345 dev_queue_xmit(skb); 352 len = skb->len;
353 ret = dev_queue_xmit(skb);
354
355 if (likely(ret == NET_XMIT_SUCCESS)) {
356 txq->tx_packets++;
357 txq->tx_bytes += len;
358 } else
359 txq->tx_dropped++;
360
346 return NETDEV_TX_OK; 361 return NETDEV_TX_OK;
347} 362}
348 363