diff options
author | Breno Leitao <leitao@linux.vnet.ibm.com> | 2010-10-27 04:45:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-10-27 17:24:13 -0400 |
commit | ce45b873028fdf94a24f0850cd554e6fda593e16 (patch) | |
tree | ca3ff2670d7a8c12228bdc86d4d0d501facb05a0 | |
parent | a71fb88145a03678fef3796930993e390db68a15 (diff) |
ehea: Fixing statistics
(Applied over Eric's "ehea: fix use after free" patch)
Currently ehea stats are broken. The bytes counters are got from
the hardware, while the packets counters are got from the device
driver. Also, the device driver counters are resetted during the
the down process, and the hardware aren't, causing some weird
numbers.
This patch just consolidates the packets and bytes on the device
driver.
Signed-off-by: Breno Leitao <leitao@linux.vnet.ibm.com>
Reviewed-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ehea/ehea.h | 2 | ||||
-rw-r--r-- | drivers/net/ehea/ehea_main.c | 32 |
2 files changed, 28 insertions, 6 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h index 1321cb6401cf..8e745e74828d 100644 --- a/drivers/net/ehea/ehea.h +++ b/drivers/net/ehea/ehea.h | |||
@@ -396,7 +396,9 @@ struct ehea_port_res { | |||
396 | int swqe_ll_count; | 396 | int swqe_ll_count; |
397 | u32 swqe_id_counter; | 397 | u32 swqe_id_counter; |
398 | u64 tx_packets; | 398 | u64 tx_packets; |
399 | u64 tx_bytes; | ||
399 | u64 rx_packets; | 400 | u64 rx_packets; |
401 | u64 rx_bytes; | ||
400 | u32 poll_counter; | 402 | u32 poll_counter; |
401 | struct net_lro_mgr lro_mgr; | 403 | struct net_lro_mgr lro_mgr; |
402 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; | 404 | struct net_lro_desc lro_desc[MAX_LRO_DESCRIPTORS]; |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index e59d38629099..182b2a7be8dc 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
@@ -330,7 +330,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
330 | struct ehea_port *port = netdev_priv(dev); | 330 | struct ehea_port *port = netdev_priv(dev); |
331 | struct net_device_stats *stats = &port->stats; | 331 | struct net_device_stats *stats = &port->stats; |
332 | struct hcp_ehea_port_cb2 *cb2; | 332 | struct hcp_ehea_port_cb2 *cb2; |
333 | u64 hret, rx_packets, tx_packets; | 333 | u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0; |
334 | int i; | 334 | int i; |
335 | 335 | ||
336 | memset(stats, 0, sizeof(*stats)); | 336 | memset(stats, 0, sizeof(*stats)); |
@@ -353,18 +353,22 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev) | |||
353 | ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); | 353 | ehea_dump(cb2, sizeof(*cb2), "net_device_stats"); |
354 | 354 | ||
355 | rx_packets = 0; | 355 | rx_packets = 0; |
356 | for (i = 0; i < port->num_def_qps; i++) | 356 | for (i = 0; i < port->num_def_qps; i++) { |
357 | rx_packets += port->port_res[i].rx_packets; | 357 | rx_packets += port->port_res[i].rx_packets; |
358 | rx_bytes += port->port_res[i].rx_bytes; | ||
359 | } | ||
358 | 360 | ||
359 | tx_packets = 0; | 361 | tx_packets = 0; |
360 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) | 362 | for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { |
361 | tx_packets += port->port_res[i].tx_packets; | 363 | tx_packets += port->port_res[i].tx_packets; |
364 | tx_bytes += port->port_res[i].tx_bytes; | ||
365 | } | ||
362 | 366 | ||
363 | stats->tx_packets = tx_packets; | 367 | stats->tx_packets = tx_packets; |
364 | stats->multicast = cb2->rxmcp; | 368 | stats->multicast = cb2->rxmcp; |
365 | stats->rx_errors = cb2->rxuerr; | 369 | stats->rx_errors = cb2->rxuerr; |
366 | stats->rx_bytes = cb2->rxo; | 370 | stats->rx_bytes = rx_bytes; |
367 | stats->tx_bytes = cb2->txo; | 371 | stats->tx_bytes = tx_bytes; |
368 | stats->rx_packets = rx_packets; | 372 | stats->rx_packets = rx_packets; |
369 | 373 | ||
370 | out_herr: | 374 | out_herr: |
@@ -703,6 +707,7 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
703 | int skb_arr_rq2_len = pr->rq2_skba.len; | 707 | int skb_arr_rq2_len = pr->rq2_skba.len; |
704 | int skb_arr_rq3_len = pr->rq3_skba.len; | 708 | int skb_arr_rq3_len = pr->rq3_skba.len; |
705 | int processed, processed_rq1, processed_rq2, processed_rq3; | 709 | int processed, processed_rq1, processed_rq2, processed_rq3; |
710 | u64 processed_bytes = 0; | ||
706 | int wqe_index, last_wqe_index, rq, port_reset; | 711 | int wqe_index, last_wqe_index, rq, port_reset; |
707 | 712 | ||
708 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; | 713 | processed = processed_rq1 = processed_rq2 = processed_rq3 = 0; |
@@ -760,6 +765,7 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
760 | processed_rq3++; | 765 | processed_rq3++; |
761 | } | 766 | } |
762 | 767 | ||
768 | processed_bytes += skb->len; | ||
763 | ehea_proc_skb(pr, cqe, skb); | 769 | ehea_proc_skb(pr, cqe, skb); |
764 | } else { | 770 | } else { |
765 | pr->p_stats.poll_receive_errors++; | 771 | pr->p_stats.poll_receive_errors++; |
@@ -775,6 +781,7 @@ static int ehea_proc_rwqes(struct net_device *dev, | |||
775 | lro_flush_all(&pr->lro_mgr); | 781 | lro_flush_all(&pr->lro_mgr); |
776 | 782 | ||
777 | pr->rx_packets += processed; | 783 | pr->rx_packets += processed; |
784 | pr->rx_bytes += processed_bytes; | ||
778 | 785 | ||
779 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); | 786 | ehea_refill_rq1(pr, last_wqe_index, processed_rq1); |
780 | ehea_refill_rq2(pr, processed_rq2); | 787 | ehea_refill_rq2(pr, processed_rq2); |
@@ -1509,9 +1516,20 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr, | |||
1509 | enum ehea_eq_type eq_type = EHEA_EQ; | 1516 | enum ehea_eq_type eq_type = EHEA_EQ; |
1510 | struct ehea_qp_init_attr *init_attr = NULL; | 1517 | struct ehea_qp_init_attr *init_attr = NULL; |
1511 | int ret = -EIO; | 1518 | int ret = -EIO; |
1519 | u64 tx_bytes, rx_bytes, tx_packets, rx_packets; | ||
1520 | |||
1521 | tx_bytes = pr->tx_bytes; | ||
1522 | tx_packets = pr->tx_packets; | ||
1523 | rx_bytes = pr->rx_bytes; | ||
1524 | rx_packets = pr->rx_packets; | ||
1512 | 1525 | ||
1513 | memset(pr, 0, sizeof(struct ehea_port_res)); | 1526 | memset(pr, 0, sizeof(struct ehea_port_res)); |
1514 | 1527 | ||
1528 | pr->tx_bytes = rx_bytes; | ||
1529 | pr->tx_packets = tx_packets; | ||
1530 | pr->rx_bytes = rx_bytes; | ||
1531 | pr->rx_packets = rx_packets; | ||
1532 | |||
1515 | pr->port = port; | 1533 | pr->port = port; |
1516 | spin_lock_init(&pr->xmit_lock); | 1534 | spin_lock_init(&pr->xmit_lock); |
1517 | spin_lock_init(&pr->netif_queue); | 1535 | spin_lock_init(&pr->netif_queue); |
@@ -2254,6 +2272,9 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2254 | swqe->vlan_tag = vlan_tx_tag_get(skb); | 2272 | swqe->vlan_tag = vlan_tx_tag_get(skb); |
2255 | } | 2273 | } |
2256 | 2274 | ||
2275 | pr->tx_packets++; | ||
2276 | pr->tx_bytes += skb->len; | ||
2277 | |||
2257 | if (skb->len <= SWQE3_MAX_IMM) { | 2278 | if (skb->len <= SWQE3_MAX_IMM) { |
2258 | u32 sig_iv = port->sig_comp_iv; | 2279 | u32 sig_iv = port->sig_comp_iv; |
2259 | u32 swqe_num = pr->swqe_id_counter; | 2280 | u32 swqe_num = pr->swqe_id_counter; |
@@ -2295,7 +2316,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2295 | } | 2316 | } |
2296 | 2317 | ||
2297 | ehea_post_swqe(pr->qp, swqe); | 2318 | ehea_post_swqe(pr->qp, swqe); |
2298 | pr->tx_packets++; | ||
2299 | 2319 | ||
2300 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { | 2320 | if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { |
2301 | spin_lock_irqsave(&pr->netif_queue, flags); | 2321 | spin_lock_irqsave(&pr->netif_queue, flags); |