aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea/ehea_main.c
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2007-03-23 12:18:53 -0400
committerJeff Garzik <jeff@garzik.org>2007-04-28 11:01:02 -0400
commitacbddb591ba76bb20204fd6a407cb87d3f5f751e (patch)
tree008f1965aea9567bfbaeb9f46ab71e44662fd6d7 /drivers/net/ehea/ehea_main.c
parent144213d71ce8b2a1e0740dd25808143e9ace655a (diff)
ehea: removing unused functionality
This patch includes: - removal of unused fields in structs - ethtool statistics cleanup - removes unsed functionality from send path Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea/ehea_main.c')
-rw-r--r--drivers/net/ehea/ehea_main.c55
1 files changed, 20 insertions, 35 deletions
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 8bceb4e6bb82..e6fe2cfbd999 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -327,6 +327,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
327{ 327{
328 struct sk_buff *skb; 328 struct sk_buff *skb;
329 329
330 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
331 pr->p_stats.err_tcp_cksum++;
332 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
333 pr->p_stats.err_ip_cksum++;
334 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
335 pr->p_stats.err_frame_crc++;
336
330 if (netif_msg_rx_err(pr->port)) { 337 if (netif_msg_rx_err(pr->port)) {
331 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); 338 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
332 ehea_dump(cqe, sizeof(*cqe), "CQE"); 339 ehea_dump(cqe, sizeof(*cqe), "CQE");
@@ -428,7 +435,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
428 else 435 else
429 netif_receive_skb(skb); 436 netif_receive_skb(skb);
430 } else { 437 } else {
431 pr->p_state.poll_receive_errors++; 438 pr->p_stats.poll_receive_errors++;
432 port_reset = ehea_treat_poll_error(pr, rq, cqe, 439 port_reset = ehea_treat_poll_error(pr, rq, cqe,
433 &processed_rq2, 440 &processed_rq2,
434 &processed_rq3); 441 &processed_rq3);
@@ -449,34 +456,15 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
449 return cqe; 456 return cqe;
450} 457}
451 458
452static void ehea_free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
453{
454 struct sk_buff *skb;
455 int index, max_index_mask, i;
456
457 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
458 max_index_mask = pr->sq_skba.len - 1;
459 for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
460 skb = pr->sq_skba.arr[index];
461 if (likely(skb)) {
462 dev_kfree_skb(skb);
463 pr->sq_skba.arr[index] = NULL;
464 } else {
465 ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
466 cqe->wr_id, i, index);
467 }
468 index--;
469 index &= max_index_mask;
470 }
471}
472
473static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 459static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
474{ 460{
461 struct sk_buff *skb;
475 struct ehea_cq *send_cq = pr->send_cq; 462 struct ehea_cq *send_cq = pr->send_cq;
476 struct ehea_cqe *cqe; 463 struct ehea_cqe *cqe;
477 int quota = my_quota; 464 int quota = my_quota;
478 int cqe_counter = 0; 465 int cqe_counter = 0;
479 int swqe_av = 0; 466 int swqe_av = 0;
467 int index;
480 unsigned long flags; 468 unsigned long flags;
481 469
482 cqe = ehea_poll_cq(send_cq); 470 cqe = ehea_poll_cq(send_cq);
@@ -498,8 +486,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
498 ehea_dump(cqe, sizeof(*cqe), "CQE"); 486 ehea_dump(cqe, sizeof(*cqe), "CQE");
499 487
500 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 488 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
501 == EHEA_SWQE2_TYPE)) 489 == EHEA_SWQE2_TYPE)) {
502 ehea_free_sent_skbs(cqe, pr); 490
491 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
492 skb = pr->sq_skba.arr[index];
493 dev_kfree_skb(skb);
494 pr->sq_skba.arr[index] = NULL;
495 }
503 496
504 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 497 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
505 quota--; 498 quota--;
@@ -1092,8 +1085,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1092 memset(pr, 0, sizeof(struct ehea_port_res)); 1085 memset(pr, 0, sizeof(struct ehea_port_res));
1093 1086
1094 pr->port = port; 1087 pr->port = port;
1095 spin_lock_init(&pr->send_lock);
1096 spin_lock_init(&pr->recv_lock);
1097 spin_lock_init(&pr->xmit_lock); 1088 spin_lock_init(&pr->xmit_lock);
1098 spin_lock_init(&pr->netif_queue); 1089 spin_lock_init(&pr->netif_queue);
1099 1090
@@ -1811,7 +1802,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1811 1802
1812 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; 1803 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1813 1804
1814
1815 if (!spin_trylock(&pr->xmit_lock)) 1805 if (!spin_trylock(&pr->xmit_lock))
1816 return NETDEV_TX_BUSY; 1806 return NETDEV_TX_BUSY;
1817 1807
@@ -1841,6 +1831,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1841 swqe->wr_id = 1831 swqe->wr_id =
1842 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) 1832 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1843 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) 1833 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1834 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1844 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); 1835 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1845 pr->sq_skba.arr[pr->sq_skba.index] = skb; 1836 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1846 1837
@@ -1849,14 +1840,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1849 1840
1850 lkey = pr->send_mr.lkey; 1841 lkey = pr->send_mr.lkey;
1851 ehea_xmit2(skb, dev, swqe, lkey); 1842 ehea_xmit2(skb, dev, swqe, lkey);
1852 1843 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1853 if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
1854 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1855 EHEA_SIG_IV_LONG);
1856 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1857 pr->swqe_count = 0;
1858 } else
1859 pr->swqe_count += 1;
1860 } 1844 }
1861 pr->swqe_id_counter += 1; 1845 pr->swqe_id_counter += 1;
1862 1846
@@ -1876,6 +1860,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1876 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1860 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1877 spin_lock_irqsave(&pr->netif_queue, flags); 1861 spin_lock_irqsave(&pr->netif_queue, flags);
1878 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1862 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1863 pr->p_stats.queue_stopped++;
1879 netif_stop_queue(dev); 1864 netif_stop_queue(dev);
1880 pr->queue_stopped = 1; 1865 pr->queue_stopped = 1;
1881 } 1866 }