aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4/cm.c
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2010-05-05 17:45:40 -0400
committerRoland Dreier <rolandd@cisco.com>2010-05-05 17:45:40 -0400
commitbe4c9bad9d0edb6bc3bd8fffc2f98e0e2112da39 (patch)
tree1fbe204cb8f386e35581bd9fa8ea835950b076c4 /drivers/infiniband/hw/cxgb4/cm.c
parentcfdda9d764362ab77b11a410bb928400e6520d57 (diff)
MAINTAINERS: Add cxgb4 and iw_cxgb4 entries
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cm.c')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c316
1 files changed, 180 insertions, 136 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 07b068be0cfa..30ce0a8eca09 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@ static char *states[] = {
61 NULL, 61 NULL,
62}; 62};
63 63
64int c4iw_max_read_depth = 8;
65module_param(c4iw_max_read_depth, int, 0644);
66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
67
64static int enable_tcp_timestamps; 68static int enable_tcp_timestamps;
65module_param(enable_tcp_timestamps, int, 0644); 69module_param(enable_tcp_timestamps, int, 0644);
66MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024;
113module_param(snd_win, int, 0644); 117module_param(snd_win, int, 0644);
114MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); 118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
115 119
116static void process_work(struct work_struct *work);
117static struct workqueue_struct *workq; 120static struct workqueue_struct *workq;
118static DECLARE_WORK(skb_work, process_work);
119 121
120static struct sk_buff_head rxq; 122static struct sk_buff_head rxq;
121static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
122c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
123 123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg); 125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status); 126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127 127
128static LIST_HEAD(timeout_list);
129static spinlock_t timeout_lock;
130
128static void start_ep_timer(struct c4iw_ep *ep) 131static void start_ep_timer(struct c4iw_ep *ep)
129{ 132{
130 PDBG("%s ep %p\n", __func__, ep); 133 PDBG("%s ep %p\n", __func__, ep);
@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep)
271 c4iw_put_ep(&ep->com); 274 c4iw_put_ep(&ep->com);
272} 275}
273 276
274static void process_work(struct work_struct *work)
275{
276 struct sk_buff *skb = NULL;
277 struct c4iw_dev *dev;
278 struct cpl_act_establish *rpl = cplhdr(skb);
279 unsigned int opcode;
280 int ret;
281
282 while ((skb = skb_dequeue(&rxq))) {
283 rpl = cplhdr(skb);
284 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
285 opcode = rpl->ot.opcode;
286
287 BUG_ON(!work_handlers[opcode]);
288 ret = work_handlers[opcode](dev, skb);
289 if (!ret)
290 kfree_skb(skb);
291 }
292}
293
294static int status2errno(int status) 277static int status2errno(int status)
295{ 278{
296 switch (status) { 279 switch (status) {
@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1795 return 0; 1778 return 0;
1796} 1779}
1797 1780
1798static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
1799{
1800 struct cpl_fw6_msg *rpl = cplhdr(skb);
1801 struct c4iw_wr_wait *wr_waitp;
1802 int ret;
1803
1804 PDBG("%s type %u\n", __func__, rpl->type);
1805
1806 switch (rpl->type) {
1807 case 1:
1808 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
1809 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
1810 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
1811 if (wr_waitp) {
1812 wr_waitp->ret = ret;
1813 wr_waitp->done = 1;
1814 wake_up(&wr_waitp->wait);
1815 }
1816 break;
1817 case 2:
1818 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
1819 break;
1820 default:
1821 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
1822 rpl->type);
1823 break;
1824 }
1825 return 0;
1826}
1827
1828static void ep_timeout(unsigned long arg)
1829{
1830 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
1831 struct c4iw_qp_attributes attrs;
1832 unsigned long flags;
1833 int abort = 1;
1834
1835 spin_lock_irqsave(&ep->com.lock, flags);
1836 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1837 ep->com.state);
1838 switch (ep->com.state) {
1839 case MPA_REQ_SENT:
1840 __state_set(&ep->com, ABORTING);
1841 connect_reply_upcall(ep, -ETIMEDOUT);
1842 break;
1843 case MPA_REQ_WAIT:
1844 __state_set(&ep->com, ABORTING);
1845 break;
1846 case CLOSING:
1847 case MORIBUND:
1848 if (ep->com.cm_id && ep->com.qp) {
1849 attrs.next_state = C4IW_QP_STATE_ERROR;
1850 c4iw_modify_qp(ep->com.qp->rhp,
1851 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1852 &attrs, 1);
1853 }
1854 __state_set(&ep->com, ABORTING);
1855 break;
1856 default:
1857 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
1858 __func__, ep, ep->hwtid, ep->com.state);
1859 WARN_ON(1);
1860 abort = 0;
1861 }
1862 spin_unlock_irqrestore(&ep->com.lock, flags);
1863 if (abort)
1864 abort_connection(ep, NULL, GFP_ATOMIC);
1865 c4iw_put_ep(&ep->com);
1866}
1867
1868int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1781int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1869{ 1782{
1870 int err; 1783 int err;
@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1904 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1817 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1905 BUG_ON(!qp); 1818 BUG_ON(!qp);
1906 1819
1907 if ((conn_param->ord > T4_MAX_READ_DEPTH) || 1820 if ((conn_param->ord > c4iw_max_read_depth) ||
1908 (conn_param->ird > T4_MAX_READ_DEPTH)) { 1821 (conn_param->ird > c4iw_max_read_depth)) {
1909 abort_connection(ep, NULL, GFP_KERNEL); 1822 abort_connection(ep, NULL, GFP_KERNEL);
1910 err = -EINVAL; 1823 err = -EINVAL;
1911 goto err; 1824 goto err;
@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1968 struct net_device *pdev; 1881 struct net_device *pdev;
1969 int step; 1882 int step;
1970 1883
1884 if ((conn_param->ord > c4iw_max_read_depth) ||
1885 (conn_param->ird > c4iw_max_read_depth)) {
1886 err = -EINVAL;
1887 goto out;
1888 }
1971 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1889 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1972 if (!ep) { 1890 if (!ep) {
1973 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 1891 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2115 */ 2033 */
2116 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2034 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2117 if (ep->stid == -1) { 2035 if (ep->stid == -1) {
2118 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2036 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2119 err = -ENOMEM; 2037 err = -ENOMEM;
2120 goto fail2; 2038 goto fail2;
2121 } 2039 }
@@ -2244,6 +2162,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2244} 2162}
2245 2163
2246/* 2164/*
2165 * These are the real handlers that are called from a
2166 * work queue.
2167 */
2168static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2169 [CPL_ACT_ESTABLISH] = act_establish,
2170 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2171 [CPL_RX_DATA] = rx_data,
2172 [CPL_ABORT_RPL_RSS] = abort_rpl,
2173 [CPL_ABORT_RPL] = abort_rpl,
2174 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2175 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2176 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2177 [CPL_PASS_ESTABLISH] = pass_establish,
2178 [CPL_PEER_CLOSE] = peer_close,
2179 [CPL_ABORT_REQ_RSS] = peer_abort,
2180 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2181 [CPL_RDMA_TERMINATE] = terminate,
2182 [CPL_FW4_ACK] = fw4_ack
2183};
2184
2185static void process_timeout(struct c4iw_ep *ep)
2186{
2187 struct c4iw_qp_attributes attrs;
2188 int abort = 1;
2189
2190 spin_lock_irq(&ep->com.lock);
2191 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2192 ep->com.state);
2193 switch (ep->com.state) {
2194 case MPA_REQ_SENT:
2195 __state_set(&ep->com, ABORTING);
2196 connect_reply_upcall(ep, -ETIMEDOUT);
2197 break;
2198 case MPA_REQ_WAIT:
2199 __state_set(&ep->com, ABORTING);
2200 break;
2201 case CLOSING:
2202 case MORIBUND:
2203 if (ep->com.cm_id && ep->com.qp) {
2204 attrs.next_state = C4IW_QP_STATE_ERROR;
2205 c4iw_modify_qp(ep->com.qp->rhp,
2206 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2207 &attrs, 1);
2208 }
2209 __state_set(&ep->com, ABORTING);
2210 break;
2211 default:
2212 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2213 __func__, ep, ep->hwtid, ep->com.state);
2214 WARN_ON(1);
2215 abort = 0;
2216 }
2217 spin_unlock_irq(&ep->com.lock);
2218 if (abort)
2219 abort_connection(ep, NULL, GFP_KERNEL);
2220 c4iw_put_ep(&ep->com);
2221}
2222
2223static void process_timedout_eps(void)
2224{
2225 struct c4iw_ep *ep;
2226
2227 spin_lock_irq(&timeout_lock);
2228 while (!list_empty(&timeout_list)) {
2229 struct list_head *tmp;
2230
2231 tmp = timeout_list.next;
2232 list_del(tmp);
2233 spin_unlock_irq(&timeout_lock);
2234 ep = list_entry(tmp, struct c4iw_ep, entry);
2235 process_timeout(ep);
2236 spin_lock_irq(&timeout_lock);
2237 }
2238 spin_unlock_irq(&timeout_lock);
2239}
2240
2241static void process_work(struct work_struct *work)
2242{
2243 struct sk_buff *skb = NULL;
2244 struct c4iw_dev *dev;
2245 struct cpl_act_establish *rpl = cplhdr(skb);
2246 unsigned int opcode;
2247 int ret;
2248
2249 while ((skb = skb_dequeue(&rxq))) {
2250 rpl = cplhdr(skb);
2251 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2252 opcode = rpl->ot.opcode;
2253
2254 BUG_ON(!work_handlers[opcode]);
2255 ret = work_handlers[opcode](dev, skb);
2256 if (!ret)
2257 kfree_skb(skb);
2258 }
2259 process_timedout_eps();
2260}
2261
2262static DECLARE_WORK(skb_work, process_work);
2263
2264static void ep_timeout(unsigned long arg)
2265{
2266 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2267
2268 spin_lock(&timeout_lock);
2269 list_add_tail(&ep->entry, &timeout_list);
2270 spin_unlock(&timeout_lock);
2271 queue_work(workq, &skb_work);
2272}
2273
2274/*
2247 * All the CM events are handled on a work queue to have a safe context. 2275 * All the CM events are handled on a work queue to have a safe context.
2248 */ 2276 */
2249static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2277static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2273 return 0; 2301 return 0;
2274} 2302}
2275 2303
2304static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2305{
2306 struct cpl_fw6_msg *rpl = cplhdr(skb);
2307 struct c4iw_wr_wait *wr_waitp;
2308 int ret;
2309
2310 PDBG("%s type %u\n", __func__, rpl->type);
2311
2312 switch (rpl->type) {
2313 case 1:
2314 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2315 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2317 if (wr_waitp) {
2318 wr_waitp->ret = ret;
2319 wr_waitp->done = 1;
2320 wake_up(&wr_waitp->wait);
2321 }
2322 break;
2323 case 2:
2324 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2325 break;
2326 default:
2327 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2328 rpl->type);
2329 break;
2330 }
2331 return 0;
2332}
2333
2334/*
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2337 */
2338c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2339 [CPL_ACT_ESTABLISH] = sched,
2340 [CPL_ACT_OPEN_RPL] = sched,
2341 [CPL_RX_DATA] = sched,
2342 [CPL_ABORT_RPL_RSS] = sched,
2343 [CPL_ABORT_RPL] = sched,
2344 [CPL_PASS_OPEN_RPL] = sched,
2345 [CPL_CLOSE_LISTSRV_RPL] = sched,
2346 [CPL_PASS_ACCEPT_REQ] = sched,
2347 [CPL_PASS_ESTABLISH] = sched,
2348 [CPL_PEER_CLOSE] = sched,
2349 [CPL_CLOSE_CON_RPL] = sched,
2350 [CPL_ABORT_REQ_RSS] = sched,
2351 [CPL_RDMA_TERMINATE] = sched,
2352 [CPL_FW4_ACK] = sched,
2353 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2354 [CPL_FW6_MSG] = fw6_msg
2355};
2356
2276int __init c4iw_cm_init(void) 2357int __init c4iw_cm_init(void)
2277{ 2358{
2359 spin_lock_init(&timeout_lock);
2278 skb_queue_head_init(&rxq); 2360 skb_queue_head_init(&rxq);
2279 2361
2280 workq = create_singlethread_workqueue("iw_cxgb4"); 2362 workq = create_singlethread_workqueue("iw_cxgb4");
2281 if (!workq) 2363 if (!workq)
2282 return -ENOMEM; 2364 return -ENOMEM;
2283 2365
2284 /*
2285 * Most upcalls from the T4 Core go to sched() to
2286 * schedule the processing on a work queue.
2287 */
2288 c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
2289 c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
2290 c4iw_handlers[CPL_RX_DATA] = sched;
2291 c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
2292 c4iw_handlers[CPL_ABORT_RPL] = sched;
2293 c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
2294 c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2295 c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2296 c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
2297 c4iw_handlers[CPL_PEER_CLOSE] = sched;
2298 c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
2299 c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
2300 c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
2301 c4iw_handlers[CPL_FW4_ACK] = sched;
2302 c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2303 c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
2304
2305 /*
2306 * These are the real handlers that are called from a
2307 * work queue.
2308 */
2309 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2310 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2311 work_handlers[CPL_RX_DATA] = rx_data;
2312 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2313 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2314 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2315 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2316 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2317 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2318 work_handlers[CPL_PEER_CLOSE] = peer_close;
2319 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2320 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2321 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2322 work_handlers[CPL_FW4_ACK] = fw4_ack;
2323 return 0; 2366 return 0;
2324} 2367}
2325 2368
2326void __exit c4iw_cm_term(void) 2369void __exit c4iw_cm_term(void)
2327{ 2370{
2371 WARN_ON(!list_empty(&timeout_list));
2328 flush_workqueue(workq); 2372 flush_workqueue(workq);
2329 destroy_workqueue(workq); 2373 destroy_workqueue(workq);
2330} 2374}