aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ehea
diff options
context:
space:
mode:
authorJan-Bernd Themann <ossthema@de.ibm.com>2007-03-23 12:18:53 -0400
committerJeff Garzik <jeff@garzik.org>2007-04-28 11:01:02 -0400
commitacbddb591ba76bb20204fd6a407cb87d3f5f751e (patch)
tree008f1965aea9567bfbaeb9f46ab71e44662fd6d7 /drivers/net/ehea
parent144213d71ce8b2a1e0740dd25808143e9ace655a (diff)
ehea: removing unused functionality
This patch includes: - removal of unused fields in structs - ethtool statistics cleanup - removes unsed functionality from send path Signed-off-by: Jan-Bernd Themann <themann@de.ibm.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/ehea')
-rw-r--r--drivers/net/ehea/ehea.h25
-rw-r--r--drivers/net/ehea/ehea_ethtool.c111
-rw-r--r--drivers/net/ehea/ehea_main.c55
-rw-r--r--drivers/net/ehea/ehea_qmr.h2
4 files changed, 69 insertions, 124 deletions
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index f8899339baa0..1405d0b0b7e7 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0054" 42#define DRV_VERSION "EHEA_0055"
43 43
44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -79,7 +79,6 @@
79#define EHEA_L_PKT_SIZE 256 /* low latency */ 79#define EHEA_L_PKT_SIZE 256 /* low latency */
80 80
81/* Send completion signaling */ 81/* Send completion signaling */
82#define EHEA_SIG_IV_LONG 1
83 82
84/* Protection Domain Identifier */ 83/* Protection Domain Identifier */
85#define EHEA_PD_ID 0xaabcdeff 84#define EHEA_PD_ID 0xaabcdeff
@@ -106,11 +105,7 @@
106#define EHEA_CACHE_LINE 128 105#define EHEA_CACHE_LINE 128
107 106
108/* Memory Regions */ 107/* Memory Regions */
109#define EHEA_MR_MAX_TX_PAGES 20
110#define EHEA_MR_TX_DATA_PN 3
111#define EHEA_MR_ACC_CTRL 0x00800000 108#define EHEA_MR_ACC_CTRL 0x00800000
112#define EHEA_RWQES_PER_MR_RQ2 10
113#define EHEA_RWQES_PER_MR_RQ3 10
114 109
115#define EHEA_WATCH_DOG_TIMEOUT 10*HZ 110#define EHEA_WATCH_DOG_TIMEOUT 10*HZ
116 111
@@ -318,17 +313,12 @@ struct ehea_mr {
318/* 313/*
319 * Port state information 314 * Port state information
320 */ 315 */
321struct port_state { 316struct port_stats {
322 int poll_max_processed;
323 int poll_receive_errors; 317 int poll_receive_errors;
324 int ehea_poll;
325 int queue_stopped; 318 int queue_stopped;
326 int min_swqe_avail; 319 int err_tcp_cksum;
327 u64 sqc_stop_sum; 320 int err_ip_cksum;
328 int pkt_send; 321 int err_frame_crc;
329 int pkt_xmit;
330 int send_tasklet;
331 int nwqe;
332}; 322};
333 323
334#define EHEA_IRQ_NAME_SIZE 20 324#define EHEA_IRQ_NAME_SIZE 20
@@ -347,6 +337,7 @@ struct ehea_q_skb_arr {
347 * Port resources 337 * Port resources
348 */ 338 */
349struct ehea_port_res { 339struct ehea_port_res {
340 struct port_stats p_stats;
350 struct ehea_mr send_mr; /* send memory region */ 341 struct ehea_mr send_mr; /* send memory region */
351 struct ehea_mr recv_mr; /* receive memory region */ 342 struct ehea_mr recv_mr; /* receive memory region */
352 spinlock_t xmit_lock; 343 spinlock_t xmit_lock;
@@ -358,7 +349,6 @@ struct ehea_port_res {
358 struct ehea_cq *recv_cq; 349 struct ehea_cq *recv_cq;
359 struct ehea_eq *eq; 350 struct ehea_eq *eq;
360 struct net_device *d_netdev; 351 struct net_device *d_netdev;
361 spinlock_t send_lock;
362 struct ehea_q_skb_arr rq1_skba; 352 struct ehea_q_skb_arr rq1_skba;
363 struct ehea_q_skb_arr rq2_skba; 353 struct ehea_q_skb_arr rq2_skba;
364 struct ehea_q_skb_arr rq3_skba; 354 struct ehea_q_skb_arr rq3_skba;
@@ -368,11 +358,8 @@ struct ehea_port_res {
368 int swqe_refill_th; 358 int swqe_refill_th;
369 atomic_t swqe_avail; 359 atomic_t swqe_avail;
370 int swqe_ll_count; 360 int swqe_ll_count;
371 int swqe_count;
372 u32 swqe_id_counter; 361 u32 swqe_id_counter;
373 u64 tx_packets; 362 u64 tx_packets;
374 spinlock_t recv_lock;
375 struct port_state p_state;
376 u64 rx_packets; 363 u64 rx_packets;
377 u32 poll_counter; 364 u32 poll_counter;
378}; 365};
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 19950273ceb9..decec8cfe96b 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -166,33 +166,23 @@ static u32 ehea_get_rx_csum(struct net_device *dev)
166} 166}
167 167
168static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = { 168static char ehea_ethtool_stats_keys[][ETH_GSTRING_LEN] = {
169 {"poll_max_processed"},
170 {"queue_stopped"},
171 {"min_swqe_avail"},
172 {"poll_receive_err"},
173 {"pkt_send"},
174 {"pkt_xmit"},
175 {"send_tasklet"},
176 {"ehea_poll"},
177 {"nwqe"},
178 {"swqe_available_0"},
179 {"sig_comp_iv"}, 169 {"sig_comp_iv"},
180 {"swqe_refill_th"}, 170 {"swqe_refill_th"},
181 {"port resets"}, 171 {"port resets"},
182 {"rxo"}, 172 {"Receive errors"},
183 {"rx64"}, 173 {"TCP cksum errors"},
184 {"rx65"}, 174 {"IP cksum errors"},
185 {"rx128"}, 175 {"Frame cksum errors"},
186 {"rx256"}, 176 {"num SQ stopped"},
187 {"rx512"}, 177 {"SQ stopped"},
188 {"rx1024"}, 178 {"PR0 free_swqes"},
189 {"txo"}, 179 {"PR1 free_swqes"},
190 {"tx64"}, 180 {"PR2 free_swqes"},
191 {"tx65"}, 181 {"PR3 free_swqes"},
192 {"tx128"}, 182 {"PR4 free_swqes"},
193 {"tx256"}, 183 {"PR5 free_swqes"},
194 {"tx512"}, 184 {"PR6 free_swqes"},
195 {"tx1024"}, 185 {"PR7 free_swqes"},
196}; 186};
197 187
198static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data) 188static void ehea_get_strings(struct net_device *dev, u32 stringset, u8 *data)
@@ -211,63 +201,44 @@ static int ehea_get_stats_count(struct net_device *dev)
211static void ehea_get_ethtool_stats(struct net_device *dev, 201static void ehea_get_ethtool_stats(struct net_device *dev,
212 struct ethtool_stats *stats, u64 *data) 202 struct ethtool_stats *stats, u64 *data)
213{ 203{
214 u64 hret; 204 int i, k, tmp;
215 int i;
216 struct ehea_port *port = netdev_priv(dev); 205 struct ehea_port *port = netdev_priv(dev);
217 struct ehea_adapter *adapter = port->adapter;
218 struct ehea_port_res *pr = &port->port_res[0];
219 struct port_state *p_state = &pr->p_state;
220 struct hcp_ehea_port_cb6 *cb6;
221 206
222 for (i = 0; i < ehea_get_stats_count(dev); i++) 207 for (i = 0; i < ehea_get_stats_count(dev); i++)
223 data[i] = 0; 208 data[i] = 0;
224
225 i = 0; 209 i = 0;
226 210
227 data[i++] = p_state->poll_max_processed;
228 data[i++] = p_state->queue_stopped;
229 data[i++] = p_state->min_swqe_avail;
230 data[i++] = p_state->poll_receive_errors;
231 data[i++] = p_state->pkt_send;
232 data[i++] = p_state->pkt_xmit;
233 data[i++] = p_state->send_tasklet;
234 data[i++] = p_state->ehea_poll;
235 data[i++] = p_state->nwqe;
236 data[i++] = atomic_read(&port->port_res[0].swqe_avail);
237 data[i++] = port->sig_comp_iv; 211 data[i++] = port->sig_comp_iv;
238 data[i++] = port->port_res[0].swqe_refill_th; 212 data[i++] = port->port_res[0].swqe_refill_th;
239 data[i++] = port->resets; 213 data[i++] = port->resets;
240 214
241 cb6 = kzalloc(PAGE_SIZE, GFP_KERNEL); 215 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
242 if (!cb6) { 216 tmp += port->port_res[k].p_stats.poll_receive_errors;
243 ehea_error("no mem for cb6"); 217 data[i++] = tmp;
244 return; 218
245 } 219 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
220 tmp += port->port_res[k].p_stats.err_tcp_cksum;
221 data[i++] = tmp;
222
223 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
224 tmp += port->port_res[k].p_stats.err_ip_cksum;
225 data[i++] = tmp;
226
227 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
228 tmp += port->port_res[k].p_stats.err_frame_crc;
229 data[i++] = tmp;
230
231 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
232 tmp += port->port_res[k].p_stats.queue_stopped;
233 data[i++] = tmp;
234
235 for (k = 0, tmp = 0; k < EHEA_MAX_PORT_RES; k++)
236 tmp |= port->port_res[k].queue_stopped;
237 data[i++] = tmp;
238
239 for (k = 0; k < 8; k++)
240 data[i++] = atomic_read(&port->port_res[k].swqe_avail);
246 241
247 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
248 H_PORT_CB6, H_PORT_CB6_ALL, cb6);
249 if (netif_msg_hw(port))
250 ehea_dump(cb6, sizeof(*cb6), "ehea_get_ethtool_stats");
251
252 if (hret == H_SUCCESS) {
253 data[i++] = cb6->rxo;
254 data[i++] = cb6->rx64;
255 data[i++] = cb6->rx65;
256 data[i++] = cb6->rx128;
257 data[i++] = cb6->rx256;
258 data[i++] = cb6->rx512;
259 data[i++] = cb6->rx1024;
260 data[i++] = cb6->txo;
261 data[i++] = cb6->tx64;
262 data[i++] = cb6->tx65;
263 data[i++] = cb6->tx128;
264 data[i++] = cb6->tx256;
265 data[i++] = cb6->tx512;
266 data[i++] = cb6->tx1024;
267 } else
268 ehea_error("query_ehea_port failed");
269
270 kfree(cb6);
271} 242}
272 243
273const struct ethtool_ops ehea_ethtool_ops = { 244const struct ethtool_ops ehea_ethtool_ops = {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 8bceb4e6bb82..e6fe2cfbd999 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -327,6 +327,13 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
327{ 327{
328 struct sk_buff *skb; 328 struct sk_buff *skb;
329 329
330 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
331 pr->p_stats.err_tcp_cksum++;
332 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
333 pr->p_stats.err_ip_cksum++;
334 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
335 pr->p_stats.err_frame_crc++;
336
330 if (netif_msg_rx_err(pr->port)) { 337 if (netif_msg_rx_err(pr->port)) {
331 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr); 338 ehea_error("CQE Error for QP %d", pr->qp->init_attr.qp_nr);
332 ehea_dump(cqe, sizeof(*cqe), "CQE"); 339 ehea_dump(cqe, sizeof(*cqe), "CQE");
@@ -428,7 +435,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
428 else 435 else
429 netif_receive_skb(skb); 436 netif_receive_skb(skb);
430 } else { 437 } else {
431 pr->p_state.poll_receive_errors++; 438 pr->p_stats.poll_receive_errors++;
432 port_reset = ehea_treat_poll_error(pr, rq, cqe, 439 port_reset = ehea_treat_poll_error(pr, rq, cqe,
433 &processed_rq2, 440 &processed_rq2,
434 &processed_rq3); 441 &processed_rq3);
@@ -449,34 +456,15 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
449 return cqe; 456 return cqe;
450} 457}
451 458
452static void ehea_free_sent_skbs(struct ehea_cqe *cqe, struct ehea_port_res *pr)
453{
454 struct sk_buff *skb;
455 int index, max_index_mask, i;
456
457 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
458 max_index_mask = pr->sq_skba.len - 1;
459 for (i = 0; i < EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); i++) {
460 skb = pr->sq_skba.arr[index];
461 if (likely(skb)) {
462 dev_kfree_skb(skb);
463 pr->sq_skba.arr[index] = NULL;
464 } else {
465 ehea_error("skb=NULL, wr_id=%lX, loop=%d, index=%d",
466 cqe->wr_id, i, index);
467 }
468 index--;
469 index &= max_index_mask;
470 }
471}
472
473static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota) 459static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
474{ 460{
461 struct sk_buff *skb;
475 struct ehea_cq *send_cq = pr->send_cq; 462 struct ehea_cq *send_cq = pr->send_cq;
476 struct ehea_cqe *cqe; 463 struct ehea_cqe *cqe;
477 int quota = my_quota; 464 int quota = my_quota;
478 int cqe_counter = 0; 465 int cqe_counter = 0;
479 int swqe_av = 0; 466 int swqe_av = 0;
467 int index;
480 unsigned long flags; 468 unsigned long flags;
481 469
482 cqe = ehea_poll_cq(send_cq); 470 cqe = ehea_poll_cq(send_cq);
@@ -498,8 +486,13 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
498 ehea_dump(cqe, sizeof(*cqe), "CQE"); 486 ehea_dump(cqe, sizeof(*cqe), "CQE");
499 487
500 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id) 488 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
501 == EHEA_SWQE2_TYPE)) 489 == EHEA_SWQE2_TYPE)) {
502 ehea_free_sent_skbs(cqe, pr); 490
491 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
492 skb = pr->sq_skba.arr[index];
493 dev_kfree_skb(skb);
494 pr->sq_skba.arr[index] = NULL;
495 }
503 496
504 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id); 497 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
505 quota--; 498 quota--;
@@ -1092,8 +1085,6 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1092 memset(pr, 0, sizeof(struct ehea_port_res)); 1085 memset(pr, 0, sizeof(struct ehea_port_res));
1093 1086
1094 pr->port = port; 1087 pr->port = port;
1095 spin_lock_init(&pr->send_lock);
1096 spin_lock_init(&pr->recv_lock);
1097 spin_lock_init(&pr->xmit_lock); 1088 spin_lock_init(&pr->xmit_lock);
1098 spin_lock_init(&pr->netif_queue); 1089 spin_lock_init(&pr->netif_queue);
1099 1090
@@ -1811,7 +1802,6 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1811 1802
1812 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)]; 1803 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
1813 1804
1814
1815 if (!spin_trylock(&pr->xmit_lock)) 1805 if (!spin_trylock(&pr->xmit_lock))
1816 return NETDEV_TX_BUSY; 1806 return NETDEV_TX_BUSY;
1817 1807
@@ -1841,6 +1831,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1841 swqe->wr_id = 1831 swqe->wr_id =
1842 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE) 1832 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
1843 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter) 1833 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
1834 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
1844 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index); 1835 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
1845 pr->sq_skba.arr[pr->sq_skba.index] = skb; 1836 pr->sq_skba.arr[pr->sq_skba.index] = skb;
1846 1837
@@ -1849,14 +1840,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1849 1840
1850 lkey = pr->send_mr.lkey; 1841 lkey = pr->send_mr.lkey;
1851 ehea_xmit2(skb, dev, swqe, lkey); 1842 ehea_xmit2(skb, dev, swqe, lkey);
1852 1843 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1853 if (pr->swqe_count >= (EHEA_SIG_IV_LONG - 1)) {
1854 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
1855 EHEA_SIG_IV_LONG);
1856 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
1857 pr->swqe_count = 0;
1858 } else
1859 pr->swqe_count += 1;
1860 } 1844 }
1861 pr->swqe_id_counter += 1; 1845 pr->swqe_id_counter += 1;
1862 1846
@@ -1876,6 +1860,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
1876 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1860 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1877 spin_lock_irqsave(&pr->netif_queue, flags); 1861 spin_lock_irqsave(&pr->netif_queue, flags);
1878 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) { 1862 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
1863 pr->p_stats.queue_stopped++;
1879 netif_stop_queue(dev); 1864 netif_stop_queue(dev);
1880 pr->queue_stopped = 1; 1865 pr->queue_stopped = 1;
1881 } 1866 }
diff --git a/drivers/net/ehea/ehea_qmr.h b/drivers/net/ehea/ehea_qmr.h
index 24603312eb84..c0eb3e03a102 100644
--- a/drivers/net/ehea/ehea_qmr.h
+++ b/drivers/net/ehea/ehea_qmr.h
@@ -142,6 +142,8 @@ struct ehea_rwqe {
142#define EHEA_CQE_STAT_ERR_MASK 0x721F 142#define EHEA_CQE_STAT_ERR_MASK 0x721F
143#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F 143#define EHEA_CQE_STAT_FAT_ERR_MASK 0x1F
144#define EHEA_CQE_STAT_ERR_TCP 0x4000 144#define EHEA_CQE_STAT_ERR_TCP 0x4000
145#define EHEA_CQE_STAT_ERR_IP 0x2000
146#define EHEA_CQE_STAT_ERR_CRC 0x1000
145 147
146struct ehea_cqe { 148struct ehea_cqe {
147 u64 wr_id; /* work request ID from WQE */ 149 u64 wr_id; /* work request ID from WQE */