aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2010-05-05 17:45:40 -0400
committerRoland Dreier <rolandd@cisco.com>2010-05-05 17:45:40 -0400
commitbe4c9bad9d0edb6bc3bd8fffc2f98e0e2112da39 (patch)
tree1fbe204cb8f386e35581bd9fa8ea835950b076c4 /drivers/infiniband
parentcfdda9d764362ab77b11a410bb928400e6520d57 (diff)
MAINTAINERS: Add cxgb4 and iw_cxgb4 entries
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c316
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c28
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h32
6 files changed, 224 insertions, 164 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 07b068be0cfa..30ce0a8eca09 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,6 +61,10 @@ static char *states[] = {
61 NULL, 61 NULL,
62}; 62};
63 63
64int c4iw_max_read_depth = 8;
65module_param(c4iw_max_read_depth, int, 0644);
66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
67
64static int enable_tcp_timestamps; 68static int enable_tcp_timestamps;
65module_param(enable_tcp_timestamps, int, 0644); 69module_param(enable_tcp_timestamps, int, 0644);
66MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
@@ -113,18 +117,17 @@ static int snd_win = 32 * 1024;
113module_param(snd_win, int, 0644); 117module_param(snd_win, int, 0644);
114MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)"); 118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
115 119
116static void process_work(struct work_struct *work);
117static struct workqueue_struct *workq; 120static struct workqueue_struct *workq;
118static DECLARE_WORK(skb_work, process_work);
119 121
120static struct sk_buff_head rxq; 122static struct sk_buff_head rxq;
121static c4iw_handler_func work_handlers[NUM_CPL_CMDS];
122c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
123 123
124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
125static void ep_timeout(unsigned long arg); 125static void ep_timeout(unsigned long arg);
126static void connect_reply_upcall(struct c4iw_ep *ep, int status); 126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
127 127
128static LIST_HEAD(timeout_list);
129static spinlock_t timeout_lock;
130
128static void start_ep_timer(struct c4iw_ep *ep) 131static void start_ep_timer(struct c4iw_ep *ep)
129{ 132{
130 PDBG("%s ep %p\n", __func__, ep); 133 PDBG("%s ep %p\n", __func__, ep);
@@ -271,26 +274,6 @@ static void release_ep_resources(struct c4iw_ep *ep)
271 c4iw_put_ep(&ep->com); 274 c4iw_put_ep(&ep->com);
272} 275}
273 276
274static void process_work(struct work_struct *work)
275{
276 struct sk_buff *skb = NULL;
277 struct c4iw_dev *dev;
278 struct cpl_act_establish *rpl = cplhdr(skb);
279 unsigned int opcode;
280 int ret;
281
282 while ((skb = skb_dequeue(&rxq))) {
283 rpl = cplhdr(skb);
284 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
285 opcode = rpl->ot.opcode;
286
287 BUG_ON(!work_handlers[opcode]);
288 ret = work_handlers[opcode](dev, skb);
289 if (!ret)
290 kfree_skb(skb);
291 }
292}
293
294static int status2errno(int status) 277static int status2errno(int status)
295{ 278{
296 switch (status) { 279 switch (status) {
@@ -1795,76 +1778,6 @@ static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1795 return 0; 1778 return 0;
1796} 1779}
1797 1780
1798static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
1799{
1800 struct cpl_fw6_msg *rpl = cplhdr(skb);
1801 struct c4iw_wr_wait *wr_waitp;
1802 int ret;
1803
1804 PDBG("%s type %u\n", __func__, rpl->type);
1805
1806 switch (rpl->type) {
1807 case 1:
1808 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
1809 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
1810 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
1811 if (wr_waitp) {
1812 wr_waitp->ret = ret;
1813 wr_waitp->done = 1;
1814 wake_up(&wr_waitp->wait);
1815 }
1816 break;
1817 case 2:
1818 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
1819 break;
1820 default:
1821 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
1822 rpl->type);
1823 break;
1824 }
1825 return 0;
1826}
1827
1828static void ep_timeout(unsigned long arg)
1829{
1830 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
1831 struct c4iw_qp_attributes attrs;
1832 unsigned long flags;
1833 int abort = 1;
1834
1835 spin_lock_irqsave(&ep->com.lock, flags);
1836 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
1837 ep->com.state);
1838 switch (ep->com.state) {
1839 case MPA_REQ_SENT:
1840 __state_set(&ep->com, ABORTING);
1841 connect_reply_upcall(ep, -ETIMEDOUT);
1842 break;
1843 case MPA_REQ_WAIT:
1844 __state_set(&ep->com, ABORTING);
1845 break;
1846 case CLOSING:
1847 case MORIBUND:
1848 if (ep->com.cm_id && ep->com.qp) {
1849 attrs.next_state = C4IW_QP_STATE_ERROR;
1850 c4iw_modify_qp(ep->com.qp->rhp,
1851 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1852 &attrs, 1);
1853 }
1854 __state_set(&ep->com, ABORTING);
1855 break;
1856 default:
1857 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
1858 __func__, ep, ep->hwtid, ep->com.state);
1859 WARN_ON(1);
1860 abort = 0;
1861 }
1862 spin_unlock_irqrestore(&ep->com.lock, flags);
1863 if (abort)
1864 abort_connection(ep, NULL, GFP_ATOMIC);
1865 c4iw_put_ep(&ep->com);
1866}
1867
1868int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1781int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1869{ 1782{
1870 int err; 1783 int err;
@@ -1904,8 +1817,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1904 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1817 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1905 BUG_ON(!qp); 1818 BUG_ON(!qp);
1906 1819
1907 if ((conn_param->ord > T4_MAX_READ_DEPTH) || 1820 if ((conn_param->ord > c4iw_max_read_depth) ||
1908 (conn_param->ird > T4_MAX_READ_DEPTH)) { 1821 (conn_param->ird > c4iw_max_read_depth)) {
1909 abort_connection(ep, NULL, GFP_KERNEL); 1822 abort_connection(ep, NULL, GFP_KERNEL);
1910 err = -EINVAL; 1823 err = -EINVAL;
1911 goto err; 1824 goto err;
@@ -1968,6 +1881,11 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1968 struct net_device *pdev; 1881 struct net_device *pdev;
1969 int step; 1882 int step;
1970 1883
1884 if ((conn_param->ord > c4iw_max_read_depth) ||
1885 (conn_param->ird > c4iw_max_read_depth)) {
1886 err = -EINVAL;
1887 goto out;
1888 }
1971 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 1889 ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1972 if (!ep) { 1890 if (!ep) {
1973 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 1891 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
@@ -2115,7 +2033,7 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2115 */ 2033 */
2116 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); 2034 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2117 if (ep->stid == -1) { 2035 if (ep->stid == -1) {
2118 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 2036 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2119 err = -ENOMEM; 2037 err = -ENOMEM;
2120 goto fail2; 2038 goto fail2;
2121 } 2039 }
@@ -2244,6 +2162,116 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2244} 2162}
2245 2163
2246/* 2164/*
2165 * These are the real handlers that are called from a
2166 * work queue.
2167 */
2168static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2169 [CPL_ACT_ESTABLISH] = act_establish,
2170 [CPL_ACT_OPEN_RPL] = act_open_rpl,
2171 [CPL_RX_DATA] = rx_data,
2172 [CPL_ABORT_RPL_RSS] = abort_rpl,
2173 [CPL_ABORT_RPL] = abort_rpl,
2174 [CPL_PASS_OPEN_RPL] = pass_open_rpl,
2175 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2176 [CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2177 [CPL_PASS_ESTABLISH] = pass_establish,
2178 [CPL_PEER_CLOSE] = peer_close,
2179 [CPL_ABORT_REQ_RSS] = peer_abort,
2180 [CPL_CLOSE_CON_RPL] = close_con_rpl,
2181 [CPL_RDMA_TERMINATE] = terminate,
2182 [CPL_FW4_ACK] = fw4_ack
2183};
2184
2185static void process_timeout(struct c4iw_ep *ep)
2186{
2187 struct c4iw_qp_attributes attrs;
2188 int abort = 1;
2189
2190 spin_lock_irq(&ep->com.lock);
2191 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2192 ep->com.state);
2193 switch (ep->com.state) {
2194 case MPA_REQ_SENT:
2195 __state_set(&ep->com, ABORTING);
2196 connect_reply_upcall(ep, -ETIMEDOUT);
2197 break;
2198 case MPA_REQ_WAIT:
2199 __state_set(&ep->com, ABORTING);
2200 break;
2201 case CLOSING:
2202 case MORIBUND:
2203 if (ep->com.cm_id && ep->com.qp) {
2204 attrs.next_state = C4IW_QP_STATE_ERROR;
2205 c4iw_modify_qp(ep->com.qp->rhp,
2206 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2207 &attrs, 1);
2208 }
2209 __state_set(&ep->com, ABORTING);
2210 break;
2211 default:
2212 printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2213 __func__, ep, ep->hwtid, ep->com.state);
2214 WARN_ON(1);
2215 abort = 0;
2216 }
2217 spin_unlock_irq(&ep->com.lock);
2218 if (abort)
2219 abort_connection(ep, NULL, GFP_KERNEL);
2220 c4iw_put_ep(&ep->com);
2221}
2222
2223static void process_timedout_eps(void)
2224{
2225 struct c4iw_ep *ep;
2226
2227 spin_lock_irq(&timeout_lock);
2228 while (!list_empty(&timeout_list)) {
2229 struct list_head *tmp;
2230
2231 tmp = timeout_list.next;
2232 list_del(tmp);
2233 spin_unlock_irq(&timeout_lock);
2234 ep = list_entry(tmp, struct c4iw_ep, entry);
2235 process_timeout(ep);
2236 spin_lock_irq(&timeout_lock);
2237 }
2238 spin_unlock_irq(&timeout_lock);
2239}
2240
2241static void process_work(struct work_struct *work)
2242{
2243 struct sk_buff *skb = NULL;
2244 struct c4iw_dev *dev;
2245 struct cpl_act_establish *rpl = cplhdr(skb);
2246 unsigned int opcode;
2247 int ret;
2248
2249 while ((skb = skb_dequeue(&rxq))) {
2250 rpl = cplhdr(skb);
2251 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2252 opcode = rpl->ot.opcode;
2253
2254 BUG_ON(!work_handlers[opcode]);
2255 ret = work_handlers[opcode](dev, skb);
2256 if (!ret)
2257 kfree_skb(skb);
2258 }
2259 process_timedout_eps();
2260}
2261
2262static DECLARE_WORK(skb_work, process_work);
2263
2264static void ep_timeout(unsigned long arg)
2265{
2266 struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2267
2268 spin_lock(&timeout_lock);
2269 list_add_tail(&ep->entry, &timeout_list);
2270 spin_unlock(&timeout_lock);
2271 queue_work(workq, &skb_work);
2272}
2273
2274/*
2247 * All the CM events are handled on a work queue to have a safe context. 2275 * All the CM events are handled on a work queue to have a safe context.
2248 */ 2276 */
2249static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 2277static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
@@ -2273,58 +2301,74 @@ static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2273 return 0; 2301 return 0;
2274} 2302}
2275 2303
2304static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2305{
2306 struct cpl_fw6_msg *rpl = cplhdr(skb);
2307 struct c4iw_wr_wait *wr_waitp;
2308 int ret;
2309
2310 PDBG("%s type %u\n", __func__, rpl->type);
2311
2312 switch (rpl->type) {
2313 case 1:
2314 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2315 wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2316 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2317 if (wr_waitp) {
2318 wr_waitp->ret = ret;
2319 wr_waitp->done = 1;
2320 wake_up(&wr_waitp->wait);
2321 }
2322 break;
2323 case 2:
2324 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2325 break;
2326 default:
2327 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2328 rpl->type);
2329 break;
2330 }
2331 return 0;
2332}
2333
2334/*
2335 * Most upcalls from the T4 Core go to sched() to
2336 * schedule the processing on a work queue.
2337 */
2338c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2339 [CPL_ACT_ESTABLISH] = sched,
2340 [CPL_ACT_OPEN_RPL] = sched,
2341 [CPL_RX_DATA] = sched,
2342 [CPL_ABORT_RPL_RSS] = sched,
2343 [CPL_ABORT_RPL] = sched,
2344 [CPL_PASS_OPEN_RPL] = sched,
2345 [CPL_CLOSE_LISTSRV_RPL] = sched,
2346 [CPL_PASS_ACCEPT_REQ] = sched,
2347 [CPL_PASS_ESTABLISH] = sched,
2348 [CPL_PEER_CLOSE] = sched,
2349 [CPL_CLOSE_CON_RPL] = sched,
2350 [CPL_ABORT_REQ_RSS] = sched,
2351 [CPL_RDMA_TERMINATE] = sched,
2352 [CPL_FW4_ACK] = sched,
2353 [CPL_SET_TCB_RPL] = set_tcb_rpl,
2354 [CPL_FW6_MSG] = fw6_msg
2355};
2356
2276int __init c4iw_cm_init(void) 2357int __init c4iw_cm_init(void)
2277{ 2358{
2359 spin_lock_init(&timeout_lock);
2278 skb_queue_head_init(&rxq); 2360 skb_queue_head_init(&rxq);
2279 2361
2280 workq = create_singlethread_workqueue("iw_cxgb4"); 2362 workq = create_singlethread_workqueue("iw_cxgb4");
2281 if (!workq) 2363 if (!workq)
2282 return -ENOMEM; 2364 return -ENOMEM;
2283 2365
2284 /*
2285 * Most upcalls from the T4 Core go to sched() to
2286 * schedule the processing on a work queue.
2287 */
2288 c4iw_handlers[CPL_ACT_ESTABLISH] = sched;
2289 c4iw_handlers[CPL_ACT_OPEN_RPL] = sched;
2290 c4iw_handlers[CPL_RX_DATA] = sched;
2291 c4iw_handlers[CPL_ABORT_RPL_RSS] = sched;
2292 c4iw_handlers[CPL_ABORT_RPL] = sched;
2293 c4iw_handlers[CPL_PASS_OPEN_RPL] = sched;
2294 c4iw_handlers[CPL_CLOSE_LISTSRV_RPL] = sched;
2295 c4iw_handlers[CPL_PASS_ACCEPT_REQ] = sched;
2296 c4iw_handlers[CPL_PASS_ESTABLISH] = sched;
2297 c4iw_handlers[CPL_PEER_CLOSE] = sched;
2298 c4iw_handlers[CPL_CLOSE_CON_RPL] = sched;
2299 c4iw_handlers[CPL_ABORT_REQ_RSS] = sched;
2300 c4iw_handlers[CPL_RDMA_TERMINATE] = sched;
2301 c4iw_handlers[CPL_FW4_ACK] = sched;
2302 c4iw_handlers[CPL_SET_TCB_RPL] = set_tcb_rpl;
2303 c4iw_handlers[CPL_FW6_MSG] = fw6_msg;
2304
2305 /*
2306 * These are the real handlers that are called from a
2307 * work queue.
2308 */
2309 work_handlers[CPL_ACT_ESTABLISH] = act_establish;
2310 work_handlers[CPL_ACT_OPEN_RPL] = act_open_rpl;
2311 work_handlers[CPL_RX_DATA] = rx_data;
2312 work_handlers[CPL_ABORT_RPL_RSS] = abort_rpl;
2313 work_handlers[CPL_ABORT_RPL] = abort_rpl;
2314 work_handlers[CPL_PASS_OPEN_RPL] = pass_open_rpl;
2315 work_handlers[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl;
2316 work_handlers[CPL_PASS_ACCEPT_REQ] = pass_accept_req;
2317 work_handlers[CPL_PASS_ESTABLISH] = pass_establish;
2318 work_handlers[CPL_PEER_CLOSE] = peer_close;
2319 work_handlers[CPL_ABORT_REQ_RSS] = peer_abort;
2320 work_handlers[CPL_CLOSE_CON_RPL] = close_con_rpl;
2321 work_handlers[CPL_RDMA_TERMINATE] = terminate;
2322 work_handlers[CPL_FW4_ACK] = fw4_ack;
2323 return 0; 2366 return 0;
2324} 2367}
2325 2368
2326void __exit c4iw_cm_term(void) 2369void __exit c4iw_cm_term(void)
2327{ 2370{
2371 WARN_ON(!list_empty(&timeout_list));
2328 flush_workqueue(workq); 2372 flush_workqueue(workq);
2329 destroy_workqueue(workq); 2373 destroy_workqueue(workq);
2330} 2374}
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 1bd6a3e531af..491e76a0327f 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -51,8 +51,8 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
51 return; 51 return;
52 } 52 }
53 53
54 printk(KERN_ERR "%s - AE qpid 0x%x opcode %d status 0x%x " 54 printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n", __func__, 55 "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), 56 CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), 57 CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); 58 CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
@@ -60,7 +60,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
60 if (qhp->attr.state == C4IW_QP_STATE_RTS) { 60 if (qhp->attr.state == C4IW_QP_STATE_RTS) {
61 attrs.next_state = C4IW_QP_STATE_TERMINATE; 61 attrs.next_state = C4IW_QP_STATE_TERMINATE;
62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, 62 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE,
63 &attrs, 0); 63 &attrs, 1);
64 } 64 }
65 65
66 event.event = ib_event; 66 event.event = ib_event;
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index ccce6fe75701..a6269981e815 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -597,6 +597,7 @@ struct c4iw_ep {
597 struct c4iw_ep_common com; 597 struct c4iw_ep_common com;
598 struct c4iw_ep *parent_ep; 598 struct c4iw_ep *parent_ep;
599 struct timer_list timer; 599 struct timer_list timer;
600 struct list_head entry;
600 unsigned int atid; 601 unsigned int atid;
601 u32 hwtid; 602 u32 hwtid;
602 u32 snd_seq; 603 u32 snd_seq;
@@ -739,5 +740,6 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
739 740
740extern struct cxgb4_client t4c_client; 741extern struct cxgb4_client t4c_client;
741extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS]; 742extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
743extern int c4iw_max_read_depth;
742 744
743#endif 745#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 3cb50af3e52a..dfc49020bb9c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -267,8 +267,8 @@ static int c4iw_query_device(struct ib_device *ibdev,
267 props->max_qp_wr = T4_MAX_QP_DEPTH; 267 props->max_qp_wr = T4_MAX_QP_DEPTH;
268 props->max_sge = T4_MAX_RECV_SGE; 268 props->max_sge = T4_MAX_RECV_SGE;
269 props->max_sge_rd = 1; 269 props->max_sge_rd = 1;
270 props->max_qp_rd_atom = T4_MAX_READ_DEPTH; 270 props->max_qp_rd_atom = c4iw_max_read_depth;
271 props->max_qp_init_rd_atom = T4_MAX_READ_DEPTH; 271 props->max_qp_init_rd_atom = c4iw_max_read_depth;
272 props->max_cq = T4_MAX_NUM_CQ; 272 props->max_cq = T4_MAX_NUM_CQ;
273 props->max_cqe = T4_MAX_CQ_DEPTH; 273 props->max_cqe = T4_MAX_CQ_DEPTH;
274 props->max_mr = c4iw_num_stags(&dev->rdev); 274 props->max_mr = c4iw_num_stags(&dev->rdev);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bd56c841ef75..83a01dc0c4c1 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp)
856 return c4iw_ofld_send(&qhp->rhp->rdev, skb); 856 return c4iw_ofld_send(&qhp->rhp->rdev, skb);
857} 857}
858 858
859int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) 859static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
860 gfp_t gfp)
860{ 861{
861 struct fw_ri_wr *wqe; 862 struct fw_ri_wr *wqe;
862 struct sk_buff *skb; 863 struct sk_buff *skb;
@@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
865 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, 866 PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
866 qhp->ep->hwtid); 867 qhp->ep->hwtid);
867 868
868 skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); 869 skb = alloc_skb(sizeof *wqe, gfp);
869 if (!skb) 870 if (!skb)
870 return -ENOMEM; 871 return;
871 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); 872 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
872 873
873 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 874 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe)
881 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 882 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
882 term = (struct terminate_message *)wqe->u.terminate.termmsg; 883 term = (struct terminate_message *)wqe->u.terminate.termmsg;
883 build_term_codes(err_cqe, &term->layer_etype, &term->ecode); 884 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
884 return c4iw_ofld_send(&qhp->rhp->rdev, skb); 885 c4iw_ofld_send(&qhp->rhp->rdev, skb);
885} 886}
886 887
887/* 888/*
@@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1130 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) 1131 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1131 newattr.enable_bind = attrs->enable_bind; 1132 newattr.enable_bind = attrs->enable_bind;
1132 if (mask & C4IW_QP_ATTR_MAX_ORD) { 1133 if (mask & C4IW_QP_ATTR_MAX_ORD) {
1133 if (attrs->max_ord > T4_MAX_READ_DEPTH) { 1134 if (attrs->max_ord > c4iw_max_read_depth) {
1134 ret = -EINVAL; 1135 ret = -EINVAL;
1135 goto out; 1136 goto out;
1136 } 1137 }
1137 newattr.max_ord = attrs->max_ord; 1138 newattr.max_ord = attrs->max_ord;
1138 } 1139 }
1139 if (mask & C4IW_QP_ATTR_MAX_IRD) { 1140 if (mask & C4IW_QP_ATTR_MAX_IRD) {
1140 if (attrs->max_ird > T4_MAX_READ_DEPTH) { 1141 if (attrs->max_ird > c4iw_max_read_depth) {
1141 ret = -EINVAL; 1142 ret = -EINVAL;
1142 goto out; 1143 goto out;
1143 } 1144 }
@@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1215 qhp->attr.state = C4IW_QP_STATE_TERMINATE; 1216 qhp->attr.state = C4IW_QP_STATE_TERMINATE;
1216 if (qhp->ibqp.uobject) 1217 if (qhp->ibqp.uobject)
1217 t4_set_wq_in_error(&qhp->wq); 1218 t4_set_wq_in_error(&qhp->wq);
1218 if (!internal) { 1219 ep = qhp->ep;
1219 ep = qhp->ep; 1220 c4iw_get_ep(&ep->com);
1220 c4iw_get_ep(&ep->com); 1221 terminate = 1;
1221 terminate = 1; 1222 disconnect = 1;
1222 disconnect = 1;
1223 }
1224 break; 1223 break;
1225 case C4IW_QP_STATE_ERROR: 1224 case C4IW_QP_STATE_ERROR:
1226 qhp->attr.state = C4IW_QP_STATE_ERROR; 1225 qhp->attr.state = C4IW_QP_STATE_ERROR;
@@ -1301,7 +1300,7 @@ out:
1301 spin_unlock_irqrestore(&qhp->lock, flag); 1300 spin_unlock_irqrestore(&qhp->lock, flag);
1302 1301
1303 if (terminate) 1302 if (terminate)
1304 c4iw_post_terminate(qhp, NULL); 1303 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
1305 1304
1306 /* 1305 /*
1307 * If disconnect is 1, then we need to initiate a disconnect 1306 * If disconnect is 1, then we need to initiate a disconnect
@@ -1309,7 +1308,8 @@ out:
1309 * an abnormal close (RTS/CLOSING->ERROR). 1308 * an abnormal close (RTS/CLOSING->ERROR).
1310 */ 1309 */
1311 if (disconnect) { 1310 if (disconnect) {
1312 c4iw_ep_disconnect(ep, abort, GFP_KERNEL); 1311 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
1312 GFP_KERNEL);
1313 c4iw_put_ep(&ep->com); 1313 c4iw_put_ep(&ep->com);
1314 } 1314 }
1315 1315
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 3f0d2172efda..d0e8af352408 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -36,7 +36,6 @@
36#include "t4_msg.h" 36#include "t4_msg.h"
37#include "t4fw_ri_api.h" 37#include "t4fw_ri_api.h"
38 38
39#define T4_MAX_READ_DEPTH 16
40#define T4_QID_BASE 1024 39#define T4_QID_BASE 1024
41#define T4_MAX_QIDS 256 40#define T4_MAX_QIDS 256
42#define T4_MAX_NUM_QP (1<<16) 41#define T4_MAX_NUM_QP (1<<16)
@@ -450,11 +449,25 @@ struct t4_cq {
450static inline int t4_arm_cq(struct t4_cq *cq, int se) 449static inline int t4_arm_cq(struct t4_cq *cq, int se)
451{ 450{
452 u32 val; 451 u32 val;
453 452 u16 inc;
454 val = SEINTARM(se) | CIDXINC(cq->cidx_inc) | TIMERREG(6) | 453
455 INGRESSQID(cq->cqid); 454 do {
456 cq->cidx_inc = 0; 455 /*
457 writel(val, cq->gts); 456 * inc must be less the both the max update value -and-
457 * the size of the CQ.
458 */
459 inc = cq->cidx_inc <= CIDXINC_MASK ? cq->cidx_inc :
460 CIDXINC_MASK;
461 inc = inc <= (cq->size - 1) ? inc : (cq->size - 1);
462 if (inc == cq->cidx_inc)
463 val = SEINTARM(se) | CIDXINC(inc) | TIMERREG(6) |
464 INGRESSQID(cq->cqid);
465 else
466 val = SEINTARM(0) | CIDXINC(inc) | TIMERREG(7) |
467 INGRESSQID(cq->cqid);
468 cq->cidx_inc -= inc;
469 writel(val, cq->gts);
470 } while (cq->cidx_inc);
458 return 0; 471 return 0;
459} 472}
460 473
@@ -489,11 +502,12 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
489static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 502static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
490{ 503{
491 int ret = 0; 504 int ret = 0;
505 u64 bits_type_ts = be64_to_cpu(cq->queue[cq->cidx].bits_type_ts);
492 506
493 if (t4_valid_cqe(cq, &cq->queue[cq->cidx])) { 507 if (G_CQE_GENBIT(bits_type_ts) == cq->gen) {
494 *cqe = &cq->queue[cq->cidx]; 508 *cqe = &cq->queue[cq->cidx];
495 cq->timestamp = CQE_TS(*cqe); 509 cq->timestamp = G_CQE_TS(bits_type_ts);
496 } else if (CQE_TS(&cq->queue[cq->cidx]) > cq->timestamp) 510 } else if (G_CQE_TS(bits_type_ts) > cq->timestamp)
497 ret = -EOVERFLOW; 511 ret = -EOVERFLOW;
498 else 512 else
499 ret = -ENODATA; 513 ret = -ENODATA;