aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c20
1 files changed, 12 insertions, 8 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index f0319f1e8e05..869e1604b16e 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -136,7 +136,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
136 struct ehea_port *port = netdev_priv(dev); 136 struct ehea_port *port = netdev_priv(dev);
137 struct net_device_stats *stats = &port->stats; 137 struct net_device_stats *stats = &port->stats;
138 struct hcp_ehea_port_cb2 *cb2; 138 struct hcp_ehea_port_cb2 *cb2;
139 u64 hret, rx_packets; 139 u64 hret, rx_packets, tx_packets;
140 int i; 140 int i;
141 141
142 memset(stats, 0, sizeof(*stats)); 142 memset(stats, 0, sizeof(*stats));
@@ -162,7 +162,11 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
162 for (i = 0; i < port->num_def_qps; i++) 162 for (i = 0; i < port->num_def_qps; i++)
163 rx_packets += port->port_res[i].rx_packets; 163 rx_packets += port->port_res[i].rx_packets;
164 164
165 stats->tx_packets = cb2->txucp + cb2->txmcp + cb2->txbcp; 165 tx_packets = 0;
166 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
167 tx_packets += port->port_res[i].tx_packets;
168
169 stats->tx_packets = tx_packets;
166 stats->multicast = cb2->rxmcp; 170 stats->multicast = cb2->rxmcp;
167 stats->rx_errors = cb2->rxuerr; 171 stats->rx_errors = cb2->rxuerr;
168 stats->rx_bytes = cb2->rxo; 172 stats->rx_bytes = cb2->rxo;
@@ -406,11 +410,6 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
406 if (cqe->status & EHEA_CQE_STAT_ERR_CRC) 410 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
407 pr->p_stats.err_frame_crc++; 411 pr->p_stats.err_frame_crc++;
408 412
409 if (netif_msg_rx_err(pr->port)) {
410 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
411 ehea_dump(cqe, sizeof(*cqe), "CQE");
412 }
413
414 if (rq == 2) { 413 if (rq == 2) {
415 *processed_rq2 += 1; 414 *processed_rq2 += 1;
416 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe); 415 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
@@ -422,7 +421,11 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
422 } 421 }
423 422
424 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 423 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
425 ehea_error("Critical receive error. Resetting port."); 424 if (netif_msg_rx_err(pr->port)) {
425 ehea_error("Critical receive error for QP %d. "
426 "Resetting port.", pr->qp->init_attr.qp_nr);
427 ehea_dump(cqe, sizeof(*cqe), "CQE");
428 }
426 schedule_work(&pr->port->reset_task); 429 schedule_work(&pr->port->reset_task);
427 return 1; 430 return 1;
428 } 431 }
@@ -2000,6 +2003,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2000 } 2003 }
2001 2004
2002 ehea_post_swqe(pr->qp, swqe); 2005 ehea_post_swqe(pr->qp, swqe);
2006 pr->tx_packets++;
2003 2007
2004 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 2008 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2005 spin_lock_irqsave(&pr->netif_queue, flags); 2009 spin_lock_irqsave(&pr->netif_queue, flags);