aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2010-04-28 17:57:40 -0400
committerRoland Dreier <rolandd@cisco.com>2010-04-28 17:57:40 -0400
commit617c9a7e398878d036a3aa9a063ccba145854b45 (patch)
tree77e7e221467f9c2baae82f1a152e16f2b6b59302
parent73a203d2014f50d874b9e40083ad481ca70408e8 (diff)
RDMA/cxgb3: Shrink .text with compile-time init of handlers arrays
Using compile-time designated initializers for the handler arrays instead of open-coding the initialization in iwch_cm_init() is (IMHO) cleaner, and leads to substantially smaller code: on my x86-64 build, bloat-o-meter shows: add/remove: 0/1 grow/shrink: 4/3 up/down: 4/-1682 (-1678) function old new delta tx_ack 167 168 +1 state_set 55 56 +1 start_ep_timer 99 100 +1 pass_establish 177 178 +1 act_open_req_arp_failure 39 38 -1 sched 84 82 -2 iwch_cm_init 442 91 -351 work_handlers 1328 - -1328 Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c129
2 files changed, 65 insertions, 66 deletions
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index 63f975f3e30f..8e77dc543dd1 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -47,8 +47,6 @@ MODULE_DESCRIPTION("Chelsio T3 RDMA Driver");
47MODULE_LICENSE("Dual BSD/GPL"); 47MODULE_LICENSE("Dual BSD/GPL");
48MODULE_VERSION(DRV_VERSION); 48MODULE_VERSION(DRV_VERSION);
49 49
50cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
51
52static void open_rnic_dev(struct t3cdev *); 50static void open_rnic_dev(struct t3cdev *);
53static void close_rnic_dev(struct t3cdev *); 51static void close_rnic_dev(struct t3cdev *);
54static void iwch_event_handler(struct t3cdev *, u32, u32); 52static void iwch_event_handler(struct t3cdev *, u32, u32);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index cfd6db019f1e..ebfb117ba68b 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -102,12 +102,9 @@ static unsigned int cong_flavor = 1;
102module_param(cong_flavor, uint, 0644); 102module_param(cong_flavor, uint, 0644);
103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)"); 103MODULE_PARM_DESC(cong_flavor, "TCP Congestion control flavor (default=1)");
104 104
105static void process_work(struct work_struct *work);
106static struct workqueue_struct *workq; 105static struct workqueue_struct *workq;
107static DECLARE_WORK(skb_work, process_work);
108 106
109static struct sk_buff_head rxq; 107static struct sk_buff_head rxq;
110static cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS];
111 108
112static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 109static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
113static void ep_timeout(unsigned long arg); 110static void ep_timeout(unsigned long arg);
@@ -302,27 +299,6 @@ static void release_ep_resources(struct iwch_ep *ep)
302 put_ep(&ep->com); 299 put_ep(&ep->com);
303} 300}
304 301
305static void process_work(struct work_struct *work)
306{
307 struct sk_buff *skb = NULL;
308 void *ep;
309 struct t3cdev *tdev;
310 int ret;
311
312 while ((skb = skb_dequeue(&rxq))) {
313 ep = *((void **) (skb->cb));
314 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
315 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
316 if (ret & CPL_RET_BUF_DONE)
317 kfree_skb(skb);
318
319 /*
320 * ep was referenced in sched(), and is freed here.
321 */
322 put_ep((struct iwch_ep_common *)ep);
323 }
324}
325
326static int status2errno(int status) 302static int status2errno(int status)
327{ 303{
328 switch (status) { 304 switch (status) {
@@ -2157,7 +2133,49 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2157 2133
2158/* 2134/*
2159 * All the CM events are handled on a work queue to have a safe context. 2135 * All the CM events are handled on a work queue to have a safe context.
2136 * These are the real handlers that are called from the work queue.
2160 */ 2137 */
2138static const cxgb3_cpl_handler_func work_handlers[NUM_CPL_CMDS] = {
2139 [CPL_ACT_ESTABLISH] = act_establish,
2140 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2141 [CPL_RX_DATA] = rx_data,
2142 [CPL_TX_DMA_ACK] = tx_ack,
2143 [CPL_ABORT_RPL_RSS] = abort_rpl,
2144 [CPL_ABORT_RPL] = abort_rpl,
2145 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2146 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2147 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2148 [CPL_PASS_ESTABLISH] = pass_establish,
2149 [CPL_PEER_CLOSE] = peer_close,
2150 [CPL_ABORT_REQ_RSS] = peer_abort,
2151 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2152 [CPL_RDMA_TERMINATE] = terminate,
2153 [CPL_RDMA_EC_STATUS] = ec_status,
2154};
2155
2156static void process_work(struct work_struct *work)
2157{
2158 struct sk_buff *skb = NULL;
2159 void *ep;
2160 struct t3cdev *tdev;
2161 int ret;
2162
2163 while ((skb = skb_dequeue(&rxq))) {
2164 ep = *((void **) (skb->cb));
2165 tdev = *((struct t3cdev **) (skb->cb + sizeof(void *)));
2166 ret = work_handlers[G_OPCODE(ntohl((__force __be32)skb->csum))](tdev, skb, ep);
2167 if (ret & CPL_RET_BUF_DONE)
2168 kfree_skb(skb);
2169
2170 /*
2171 * ep was referenced in sched(), and is freed here.
2172 */
2173 put_ep((struct iwch_ep_common *)ep);
2174 }
2175}
2176
2177static DECLARE_WORK(skb_work, process_work);
2178
2161static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) 2179static int sched(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2162{ 2180{
2163 struct iwch_ep_common *epc = ctx; 2181 struct iwch_ep_common *epc = ctx;
@@ -2189,6 +2207,29 @@ static int set_tcb_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
2189 return CPL_RET_BUF_DONE; 2207 return CPL_RET_BUF_DONE;
2190} 2208}
2191 2209
2210/*
2211 * All upcalls from the T3 Core go to sched() to schedule the
2212 * processing on a work queue.
2213 */
2214cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS] = {
2215 [CPL_ACT_ESTABLISH] = sched,
2216 [CPL_ACT_OPEN_RPL] = sched,
2217 [CPL_RX_DATA] = sched,
2218 [CPL_TX_DMA_ACK] = sched,
2219 [CPL_ABORT_RPL_RSS] = sched,
2220 [CPL_ABORT_RPL] = sched,
2221 [CPL_PASS_OPEN_RPL] = sched,
2222 [CPL_CLOSE_LISTSRV_RPL] = sched,
2223 [CPL_PASS_ACCEPT_REQ] = sched,
2224 [CPL_PASS_ESTABLISH] = sched,
2225 [CPL_PEER_CLOSE] = sched,
2226 [CPL_CLOSE_CON_RPL] = sched,
2227 [CPL_ABORT_REQ_RSS] = sched,
2228 [CPL_RDMA_TERMINATE] = sched,
2229 [CPL_RDMA_EC_STATUS] = sched,
2230 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2231};
2232
2192int __init iwch_cm_init(void) 2233int __init iwch_cm_init(void)
2193{ 2234{
2194 skb_queue_head_init(&rxq); 2235 skb_queue_head_init(&rxq);
@@ -2197,46 +2238,6 @@ int __init iwch_cm_init(void)
2197 if (!workq) 2238 if (!workq)
2198 return -ENOMEM; 2239 return -ENOMEM;
2199 2240
2200 /*
2201 * All upcalls from the T3 Core go to sched() to
2202 * schedule the processing on a work queue.
2203 */
2204 t3c_handlers[CPL_ACT_ESTABLISH] = sched;
2205 t3c_handlers[CPL_ACT_OPEN_RPL] = sched;
2206 t3c_handlers[CPL_RX_DATA] = sched;
2207 t3c_handlers[CPL_TX_DMA_ACK] = sched;
2208 t3c_handlers[CPL_ABORT_RPL_RSS] = sched;
2209 t3c_handlers[CPL_ABORT_RPL] = sched;
2210 t3c_handlers[CPL_PASS_OPEN_RPL] = sched;
2211 t3c_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2212 t3c_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2213 t3c_handlers[CPL_PASS_ESTABLISH] = sched;
2214 t3c_handlers[CPL_PEER_CLOSE] = sched;
2215 t3c_handlers[CPL_CLOSE_CON_RPL] = sched;
2216 t3c_handlers[CPL_ABORT_REQ_RSS] = sched;
2217 t3c_handlers[CPL_RDMA_TERMINATE] = sched;
2218 t3c_handlers[CPL_RDMA_EC_STATUS] = sched;
2219 t3c_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2220
2221 /*
2222 * These are the real handlers that are called from a
2223 * work queue.
2224 */
2225 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2226 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2227 work_handlers[CPL_RX_DATA] = rx_data;
2228 work_handlers[CPL_TX_DMA_ACK] = tx_ack;
2229 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2230 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2231 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2232 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2233 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2234 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2235 work_handlers[CPL_PEER_CLOSE] = peer_close;
2236 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2237 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2238 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2239 work_handlers[CPL_RDMA_EC_STATUS] = ec_status;
2240 return 0; 2241 return 0;
2241} 2242}
2242 2243