diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2011-01-12 07:13:14 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-01-14 00:44:34 -0500 |
commit | 1ac9ad1394fa542ac7ae0dc943ee3cda678799fa (patch) | |
tree | d846be421ed68f4fc612ae7c061783dab73d5fa2 /net/sched/sch_teql.c | |
parent | 1949e084bfd143c76e22c0b37f370d6e7bf4bfdd (diff) |
net: remove dev_txq_stats_fold()
After recent changes, (percpu stats on vlan/tunnels...), we dont need
anymore per struct netdev_queue tx_bytes/tx_packets/tx_dropped counters.
Only remaining users are ixgbe, sch_teql, gianfar & macvlan :
1) ixgbe can be converted to use existing tx_ring counters.
2) macvlan incremented txq->tx_dropped, it can use the
dev->stats.tx_dropped counter.
3) sch_teql : almost revert ab35cd4b8f42 (Use net_device internal stats)
Now we have ndo_get_stats64(), use it, even for "unsigned long"
fields (No need to bring back a struct net_device_stats)
4) gianfar adds a stats structure per tx queue to hold
tx_bytes/tx_packets
This removes a lockdep warning (and possible lockup) in rndis gadget,
calling dev_get_stats() from hard IRQ context.
Ref: http://www.spinics.net/lists/netdev/msg149202.html
Reported-by: Neil Jones <neiljay@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
CC: Jarek Poplawski <jarkao2@gmail.com>
CC: Alexander Duyck <alexander.h.duyck@intel.com>
CC: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
CC: Sandeep Gopalpet <sandeep.kumar@freescale.com>
CC: Michal Nazarewicz <mina86@mina86.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_teql.c')
-rw-r--r-- | net/sched/sch_teql.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c index af9360d1f6eb..84ce48eadff4 100644 --- a/net/sched/sch_teql.c +++ b/net/sched/sch_teql.c | |||
@@ -59,6 +59,10 @@ struct teql_master | |||
59 | struct net_device *dev; | 59 | struct net_device *dev; |
60 | struct Qdisc *slaves; | 60 | struct Qdisc *slaves; |
61 | struct list_head master_list; | 61 | struct list_head master_list; |
62 | unsigned long tx_bytes; | ||
63 | unsigned long tx_packets; | ||
64 | unsigned long tx_errors; | ||
65 | unsigned long tx_dropped; | ||
62 | }; | 66 | }; |
63 | 67 | ||
64 | struct teql_sched_data | 68 | struct teql_sched_data |
@@ -274,7 +278,6 @@ static inline int teql_resolve(struct sk_buff *skb, | |||
274 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) | 278 | static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev) |
275 | { | 279 | { |
276 | struct teql_master *master = netdev_priv(dev); | 280 | struct teql_master *master = netdev_priv(dev); |
277 | struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); | ||
278 | struct Qdisc *start, *q; | 281 | struct Qdisc *start, *q; |
279 | int busy; | 282 | int busy; |
280 | int nores; | 283 | int nores; |
@@ -314,8 +317,8 @@ restart: | |||
314 | __netif_tx_unlock(slave_txq); | 317 | __netif_tx_unlock(slave_txq); |
315 | master->slaves = NEXT_SLAVE(q); | 318 | master->slaves = NEXT_SLAVE(q); |
316 | netif_wake_queue(dev); | 319 | netif_wake_queue(dev); |
317 | txq->tx_packets++; | 320 | master->tx_packets++; |
318 | txq->tx_bytes += length; | 321 | master->tx_bytes += length; |
319 | return NETDEV_TX_OK; | 322 | return NETDEV_TX_OK; |
320 | } | 323 | } |
321 | __netif_tx_unlock(slave_txq); | 324 | __netif_tx_unlock(slave_txq); |
@@ -342,10 +345,10 @@ restart: | |||
342 | netif_stop_queue(dev); | 345 | netif_stop_queue(dev); |
343 | return NETDEV_TX_BUSY; | 346 | return NETDEV_TX_BUSY; |
344 | } | 347 | } |
345 | dev->stats.tx_errors++; | 348 | master->tx_errors++; |
346 | 349 | ||
347 | drop: | 350 | drop: |
348 | txq->tx_dropped++; | 351 | master->tx_dropped++; |
349 | dev_kfree_skb(skb); | 352 | dev_kfree_skb(skb); |
350 | return NETDEV_TX_OK; | 353 | return NETDEV_TX_OK; |
351 | } | 354 | } |
@@ -398,6 +401,18 @@ static int teql_master_close(struct net_device *dev) | |||
398 | return 0; | 401 | return 0; |
399 | } | 402 | } |
400 | 403 | ||
404 | static struct rtnl_link_stats64 *teql_master_stats64(struct net_device *dev, | ||
405 | struct rtnl_link_stats64 *stats) | ||
406 | { | ||
407 | struct teql_master *m = netdev_priv(dev); | ||
408 | |||
409 | stats->tx_packets = m->tx_packets; | ||
410 | stats->tx_bytes = m->tx_bytes; | ||
411 | stats->tx_errors = m->tx_errors; | ||
412 | stats->tx_dropped = m->tx_dropped; | ||
413 | return stats; | ||
414 | } | ||
415 | |||
401 | static int teql_master_mtu(struct net_device *dev, int new_mtu) | 416 | static int teql_master_mtu(struct net_device *dev, int new_mtu) |
402 | { | 417 | { |
403 | struct teql_master *m = netdev_priv(dev); | 418 | struct teql_master *m = netdev_priv(dev); |
@@ -422,6 +437,7 @@ static const struct net_device_ops teql_netdev_ops = { | |||
422 | .ndo_open = teql_master_open, | 437 | .ndo_open = teql_master_open, |
423 | .ndo_stop = teql_master_close, | 438 | .ndo_stop = teql_master_close, |
424 | .ndo_start_xmit = teql_master_xmit, | 439 | .ndo_start_xmit = teql_master_xmit, |
440 | .ndo_get_stats64 = teql_master_stats64, | ||
425 | .ndo_change_mtu = teql_master_mtu, | 441 | .ndo_change_mtu = teql_master_mtu, |
426 | }; | 442 | }; |
427 | 443 | ||