aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cm.c20
-rw-r--r--drivers/infiniband/core/cma.c58
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c24
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h1
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_sysfs.c1
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c13
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c12
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h2
11 files changed, 101 insertions, 48 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 64e0903091a8..f804e28e1ebb 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1988,6 +1988,10 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id,
1988 goto out; 1988 goto out;
1989 } 1989 }
1990 1990
1991 if (cm_id->lap_state == IB_CM_LAP_SENT ||
1992 cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
1993 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
1994
1991 ret = cm_alloc_msg(cm_id_priv, &msg); 1995 ret = cm_alloc_msg(cm_id_priv, &msg);
1992 if (ret) { 1996 if (ret) {
1993 cm_enter_timewait(cm_id_priv); 1997 cm_enter_timewait(cm_id_priv);
@@ -2129,6 +2133,10 @@ static int cm_dreq_handler(struct cm_work *work)
2129 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); 2133 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2130 break; 2134 break;
2131 case IB_CM_ESTABLISHED: 2135 case IB_CM_ESTABLISHED:
2136 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
2137 cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
2138 ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
2139 break;
2132 case IB_CM_MRA_REP_RCVD: 2140 case IB_CM_MRA_REP_RCVD:
2133 break; 2141 break;
2134 case IB_CM_TIMEWAIT: 2142 case IB_CM_TIMEWAIT:
@@ -2349,9 +2357,18 @@ static int cm_rej_handler(struct cm_work *work)
2349 /* fall through */ 2357 /* fall through */
2350 case IB_CM_REP_RCVD: 2358 case IB_CM_REP_RCVD:
2351 case IB_CM_MRA_REP_SENT: 2359 case IB_CM_MRA_REP_SENT:
2352 case IB_CM_ESTABLISHED:
2353 cm_enter_timewait(cm_id_priv); 2360 cm_enter_timewait(cm_id_priv);
2354 break; 2361 break;
2362 case IB_CM_ESTABLISHED:
2363 if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
2364 cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
2365 if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
2366 ib_cancel_mad(cm_id_priv->av.port->mad_agent,
2367 cm_id_priv->msg);
2368 cm_enter_timewait(cm_id_priv);
2369 break;
2370 }
2371 /* fall through */
2355 default: 2372 default:
2356 spin_unlock_irq(&cm_id_priv->lock); 2373 spin_unlock_irq(&cm_id_priv->lock);
2357 ret = -EINVAL; 2374 ret = -EINVAL;
@@ -2989,6 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
2989 goto out; /* No match. */ 3006 goto out; /* No match. */
2990 } 3007 }
2991 atomic_inc(&cur_cm_id_priv->refcount); 3008 atomic_inc(&cur_cm_id_priv->refcount);
3009 atomic_inc(&cm_id_priv->refcount);
2992 spin_unlock_irq(&cm.lock); 3010 spin_unlock_irq(&cm.lock);
2993 3011
2994 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; 3012 cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 6884da24fde1..5ed9d25d021a 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -308,11 +308,13 @@ static inline void release_mc(struct kref *kref)
308 kfree(mc); 308 kfree(mc);
309} 309}
310 310
311static void cma_detach_from_dev(struct rdma_id_private *id_priv) 311static void cma_release_dev(struct rdma_id_private *id_priv)
312{ 312{
313 mutex_lock(&lock);
313 list_del(&id_priv->list); 314 list_del(&id_priv->list);
314 cma_deref_dev(id_priv->cma_dev); 315 cma_deref_dev(id_priv->cma_dev);
315 id_priv->cma_dev = NULL; 316 id_priv->cma_dev = NULL;
317 mutex_unlock(&lock);
316} 318}
317 319
318static int cma_set_qkey(struct rdma_id_private *id_priv) 320static int cma_set_qkey(struct rdma_id_private *id_priv)
@@ -373,6 +375,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
373 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? 375 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
374 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 376 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
375 377
378 mutex_lock(&lock);
376 iboe_addr_get_sgid(dev_addr, &iboe_gid); 379 iboe_addr_get_sgid(dev_addr, &iboe_gid);
377 memcpy(&gid, dev_addr->src_dev_addr + 380 memcpy(&gid, dev_addr->src_dev_addr +
378 rdma_addr_gid_offset(dev_addr), sizeof gid); 381 rdma_addr_gid_offset(dev_addr), sizeof gid);
@@ -398,6 +401,7 @@ out:
398 if (!ret) 401 if (!ret)
399 cma_attach_to_dev(id_priv, cma_dev); 402 cma_attach_to_dev(id_priv, cma_dev);
400 403
404 mutex_unlock(&lock);
401 return ret; 405 return ret;
402} 406}
403 407
@@ -904,9 +908,14 @@ void rdma_destroy_id(struct rdma_cm_id *id)
904 state = cma_exch(id_priv, CMA_DESTROYING); 908 state = cma_exch(id_priv, CMA_DESTROYING);
905 cma_cancel_operation(id_priv, state); 909 cma_cancel_operation(id_priv, state);
906 910
907 mutex_lock(&lock); 911 /*
912 * Wait for any active callback to finish. New callbacks will find
913 * the id_priv state set to destroying and abort.
914 */
915 mutex_lock(&id_priv->handler_mutex);
916 mutex_unlock(&id_priv->handler_mutex);
917
908 if (id_priv->cma_dev) { 918 if (id_priv->cma_dev) {
909 mutex_unlock(&lock);
910 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 919 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
911 case RDMA_TRANSPORT_IB: 920 case RDMA_TRANSPORT_IB:
912 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 921 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib))
@@ -920,10 +929,8 @@ void rdma_destroy_id(struct rdma_cm_id *id)
920 break; 929 break;
921 } 930 }
922 cma_leave_mc_groups(id_priv); 931 cma_leave_mc_groups(id_priv);
923 mutex_lock(&lock); 932 cma_release_dev(id_priv);
924 cma_detach_from_dev(id_priv);
925 } 933 }
926 mutex_unlock(&lock);
927 934
928 cma_release_port(id_priv); 935 cma_release_port(id_priv);
929 cma_deref_id(id_priv); 936 cma_deref_id(id_priv);
@@ -1200,9 +1207,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1200 } 1207 }
1201 1208
1202 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); 1209 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
1203 mutex_lock(&lock);
1204 ret = cma_acquire_dev(conn_id); 1210 ret = cma_acquire_dev(conn_id);
1205 mutex_unlock(&lock);
1206 if (ret) 1211 if (ret)
1207 goto release_conn_id; 1212 goto release_conn_id;
1208 1213
@@ -1210,6 +1215,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1210 cm_id->context = conn_id; 1215 cm_id->context = conn_id;
1211 cm_id->cm_handler = cma_ib_handler; 1216 cm_id->cm_handler = cma_ib_handler;
1212 1217
1218 /*
1219 * Protect against the user destroying conn_id from another thread
1220 * until we're done accessing it.
1221 */
1222 atomic_inc(&conn_id->refcount);
1213 ret = conn_id->id.event_handler(&conn_id->id, &event); 1223 ret = conn_id->id.event_handler(&conn_id->id, &event);
1214 if (!ret) { 1224 if (!ret) {
1215 /* 1225 /*
@@ -1222,8 +1232,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1222 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); 1232 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
1223 mutex_unlock(&lock); 1233 mutex_unlock(&lock);
1224 mutex_unlock(&conn_id->handler_mutex); 1234 mutex_unlock(&conn_id->handler_mutex);
1235 cma_deref_id(conn_id);
1225 goto out; 1236 goto out;
1226 } 1237 }
1238 cma_deref_id(conn_id);
1227 1239
1228 /* Destroy the CM ID by returning a non-zero value. */ 1240 /* Destroy the CM ID by returning a non-zero value. */
1229 conn_id->cm_id.ib = NULL; 1241 conn_id->cm_id.ib = NULL;
@@ -1394,9 +1406,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1394 goto out; 1406 goto out;
1395 } 1407 }
1396 1408
1397 mutex_lock(&lock);
1398 ret = cma_acquire_dev(conn_id); 1409 ret = cma_acquire_dev(conn_id);
1399 mutex_unlock(&lock);
1400 if (ret) { 1410 if (ret) {
1401 mutex_unlock(&conn_id->handler_mutex); 1411 mutex_unlock(&conn_id->handler_mutex);
1402 rdma_destroy_id(new_cm_id); 1412 rdma_destroy_id(new_cm_id);
@@ -1425,17 +1435,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,
1425 event.param.conn.private_data_len = iw_event->private_data_len; 1435 event.param.conn.private_data_len = iw_event->private_data_len;
1426 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; 1436 event.param.conn.initiator_depth = attr.max_qp_init_rd_atom;
1427 event.param.conn.responder_resources = attr.max_qp_rd_atom; 1437 event.param.conn.responder_resources = attr.max_qp_rd_atom;
1438
1439 /*
1440 * Protect against the user destroying conn_id from another thread
1441 * until we're done accessing it.
1442 */
1443 atomic_inc(&conn_id->refcount);
1428 ret = conn_id->id.event_handler(&conn_id->id, &event); 1444 ret = conn_id->id.event_handler(&conn_id->id, &event);
1429 if (ret) { 1445 if (ret) {
1430 /* User wants to destroy the CM ID */ 1446 /* User wants to destroy the CM ID */
1431 conn_id->cm_id.iw = NULL; 1447 conn_id->cm_id.iw = NULL;
1432 cma_exch(conn_id, CMA_DESTROYING); 1448 cma_exch(conn_id, CMA_DESTROYING);
1433 mutex_unlock(&conn_id->handler_mutex); 1449 mutex_unlock(&conn_id->handler_mutex);
1450 cma_deref_id(conn_id);
1434 rdma_destroy_id(&conn_id->id); 1451 rdma_destroy_id(&conn_id->id);
1435 goto out; 1452 goto out;
1436 } 1453 }
1437 1454
1438 mutex_unlock(&conn_id->handler_mutex); 1455 mutex_unlock(&conn_id->handler_mutex);
1456 cma_deref_id(conn_id);
1439 1457
1440out: 1458out:
1441 if (dev) 1459 if (dev)
@@ -1951,20 +1969,11 @@ static void addr_handler(int status, struct sockaddr *src_addr,
1951 1969
1952 memset(&event, 0, sizeof event); 1970 memset(&event, 0, sizeof event);
1953 mutex_lock(&id_priv->handler_mutex); 1971 mutex_lock(&id_priv->handler_mutex);
1954 1972 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED))
1955 /*
1956 * Grab mutex to block rdma_destroy_id() from removing the device while
1957 * we're trying to acquire it.
1958 */
1959 mutex_lock(&lock);
1960 if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) {
1961 mutex_unlock(&lock);
1962 goto out; 1973 goto out;
1963 }
1964 1974
1965 if (!status && !id_priv->cma_dev) 1975 if (!status && !id_priv->cma_dev)
1966 status = cma_acquire_dev(id_priv); 1976 status = cma_acquire_dev(id_priv);
1967 mutex_unlock(&lock);
1968 1977
1969 if (status) { 1978 if (status) {
1970 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) 1979 if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND))
@@ -2265,9 +2274,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2265 if (ret) 2274 if (ret)
2266 goto err1; 2275 goto err1;
2267 2276
2268 mutex_lock(&lock);
2269 ret = cma_acquire_dev(id_priv); 2277 ret = cma_acquire_dev(id_priv);
2270 mutex_unlock(&lock);
2271 if (ret) 2278 if (ret)
2272 goto err1; 2279 goto err1;
2273 } 2280 }
@@ -2279,11 +2286,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2279 2286
2280 return 0; 2287 return 0;
2281err2: 2288err2:
2282 if (id_priv->cma_dev) { 2289 if (id_priv->cma_dev)
2283 mutex_lock(&lock); 2290 cma_release_dev(id_priv);
2284 cma_detach_from_dev(id_priv);
2285 mutex_unlock(&lock);
2286 }
2287err1: 2291err1:
2288 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); 2292 cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE);
2289 return ret; 2293 return ret;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 97a876a0f20b..9d8dcfab2b38 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -61,9 +61,9 @@ static char *states[] = {
61 NULL, 61 NULL,
62}; 62};
63 63
64static int dack_mode; 64static int dack_mode = 1;
65module_param(dack_mode, int, 0644); 65module_param(dack_mode, int, 0644);
66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); 66MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
67 67
68int c4iw_max_read_depth = 8; 68int c4iw_max_read_depth = 8;
69module_param(c4iw_max_read_depth, int, 0644); 69module_param(c4iw_max_read_depth, int, 0644);
@@ -470,6 +470,7 @@ static int send_connect(struct c4iw_ep *ep)
470 TX_CHAN(ep->tx_chan) | 470 TX_CHAN(ep->tx_chan) |
471 SMAC_SEL(ep->smac_idx) | 471 SMAC_SEL(ep->smac_idx) |
472 DSCP(ep->tos) | 472 DSCP(ep->tos) |
473 ULP_MODE(ULP_MODE_TCPDDP) |
473 RCV_BUFSIZ(rcv_win>>10); 474 RCV_BUFSIZ(rcv_win>>10);
474 opt2 = RX_CHANNEL(0) | 475 opt2 = RX_CHANNEL(0) |
475 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 476 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
@@ -1262,6 +1263,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1262 TX_CHAN(ep->tx_chan) | 1263 TX_CHAN(ep->tx_chan) |
1263 SMAC_SEL(ep->smac_idx) | 1264 SMAC_SEL(ep->smac_idx) |
1264 DSCP(ep->tos) | 1265 DSCP(ep->tos) |
1266 ULP_MODE(ULP_MODE_TCPDDP) |
1265 RCV_BUFSIZ(rcv_win>>10); 1267 RCV_BUFSIZ(rcv_win>>10);
1266 opt2 = RX_CHANNEL(0) | 1268 opt2 = RX_CHANNEL(0) |
1267 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 1269 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 54fbc1118abe..e29172c2afcb 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -87,17 +87,22 @@ static int dump_qp(int id, void *p, void *data)
87 return 1; 87 return 1;
88 88
89 if (qp->ep) 89 if (qp->ep)
90 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u " 90 cc = snprintf(qpd->buf + qpd->pos, space,
91 "qp sq id %u rq id %u state %u onchip %u "
91 "ep tid %u state %u %pI4:%u->%pI4:%u\n", 92 "ep tid %u state %u %pI4:%u->%pI4:%u\n",
92 qp->wq.sq.qid, (int)qp->attr.state, 93 qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state,
94 qp->wq.sq.flags & T4_SQ_ONCHIP,
93 qp->ep->hwtid, (int)qp->ep->com.state, 95 qp->ep->hwtid, (int)qp->ep->com.state,
94 &qp->ep->com.local_addr.sin_addr.s_addr, 96 &qp->ep->com.local_addr.sin_addr.s_addr,
95 ntohs(qp->ep->com.local_addr.sin_port), 97 ntohs(qp->ep->com.local_addr.sin_port),
96 &qp->ep->com.remote_addr.sin_addr.s_addr, 98 &qp->ep->com.remote_addr.sin_addr.s_addr,
97 ntohs(qp->ep->com.remote_addr.sin_port)); 99 ntohs(qp->ep->com.remote_addr.sin_port));
98 else 100 else
99 cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n", 101 cc = snprintf(qpd->buf + qpd->pos, space,
100 qp->wq.sq.qid, (int)qp->attr.state); 102 "qp sq id %u rq id %u state %u onchip %u\n",
103 qp->wq.sq.qid, qp->wq.rq.qid,
104 (int)qp->attr.state,
105 qp->wq.sq.flags & T4_SQ_ONCHIP);
101 if (cc < space) 106 if (cc < space)
102 qpd->pos += cc; 107 qpd->pos += cc;
103 return 0; 108 return 0;
@@ -368,7 +373,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
368static void c4iw_remove(struct c4iw_dev *dev) 373static void c4iw_remove(struct c4iw_dev *dev)
369{ 374{
370 PDBG("%s c4iw_dev %p\n", __func__, dev); 375 PDBG("%s c4iw_dev %p\n", __func__, dev);
371 cancel_delayed_work_sync(&dev->db_drop_task);
372 list_del(&dev->entry); 376 list_del(&dev->entry);
373 if (dev->registered) 377 if (dev->registered)
374 c4iw_unregister_device(dev); 378 c4iw_unregister_device(dev);
@@ -523,8 +527,16 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
523 case CXGB4_STATE_START_RECOVERY: 527 case CXGB4_STATE_START_RECOVERY:
524 printk(KERN_INFO MOD "%s: Fatal Error\n", 528 printk(KERN_INFO MOD "%s: Fatal Error\n",
525 pci_name(dev->rdev.lldi.pdev)); 529 pci_name(dev->rdev.lldi.pdev));
526 if (dev->registered) 530 dev->rdev.flags |= T4_FATAL_ERROR;
531 if (dev->registered) {
532 struct ib_event event;
533
534 memset(&event, 0, sizeof event);
535 event.event = IB_EVENT_DEVICE_FATAL;
536 event.device = &dev->ibdev;
537 ib_dispatch_event(&event);
527 c4iw_unregister_device(dev); 538 c4iw_unregister_device(dev);
539 }
528 break; 540 break;
529 case CXGB4_STATE_DETACH: 541 case CXGB4_STATE_DETACH:
530 printk(KERN_INFO MOD "%s: Detach\n", 542 printk(KERN_INFO MOD "%s: Detach\n",
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 2fe19ec9ba60..9f6166f59268 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -176,7 +176,6 @@ struct c4iw_dev {
176 struct idr mmidr; 176 struct idr mmidr;
177 spinlock_t lock; 177 spinlock_t lock;
178 struct list_head entry; 178 struct list_head entry;
179 struct delayed_work db_drop_task;
180 struct dentry *debugfs_root; 179 struct dentry *debugfs_root;
181 u8 registered; 180 u8 registered;
182}; 181};
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 4f0be25cab1a..70a5a3c646da 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -31,9 +31,9 @@
31 */ 31 */
32#include "iw_cxgb4.h" 32#include "iw_cxgb4.h"
33 33
34static int ocqp_support; 34static int ocqp_support = 1;
35module_param(ocqp_support, int, 0644); 35module_param(ocqp_support, int, 0644);
36MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)"); 36MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
37 37
38static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) 38static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
39{ 39{
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 70004425d695..24af12fc8228 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -507,8 +507,14 @@ static inline void t4_swcq_consume(struct t4_cq *cq)
507static inline void t4_hwcq_consume(struct t4_cq *cq) 507static inline void t4_hwcq_consume(struct t4_cq *cq)
508{ 508{
509 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; 509 cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts;
510 if (++cq->cidx_inc == cq->size) 510 if (++cq->cidx_inc == (cq->size >> 4)) {
511 u32 val;
512
513 val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) |
514 INGRESSQID(cq->cqid);
515 writel(val, cq->gts);
511 cq->cidx_inc = 0; 516 cq->cidx_inc = 0;
517 }
512 if (++cq->cidx == cq->size) { 518 if (++cq->cidx == cq->size) {
513 cq->cidx = 0; 519 cq->cidx = 0;
514 cq->gen ^= 1; 520 cq->gen ^= 1;
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c
index b8cb2f145ae4..8991677e9a08 100644
--- a/drivers/infiniband/hw/ipath/ipath_sysfs.c
+++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c
@@ -557,6 +557,7 @@ static ssize_t store_reset(struct device *dev,
557 dev_info(dev,"Unit %d is disabled, can't reset\n", 557 dev_info(dev,"Unit %d is disabled, can't reset\n",
558 dd->ipath_unit); 558 dd->ipath_unit);
559 ret = -EINVAL; 559 ret = -EINVAL;
560 goto bail;
560 } 561 }
561 ret = ipath_reset_device(dd->ipath_unit); 562 ret = ipath_reset_device(dd->ipath_unit);
562bail: 563bail:
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index b01809a82cb0..4a2d21e15a70 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -5582,9 +5582,16 @@ static void qsfp_7322_event(struct work_struct *work)
5582 * even on failure to read cable information. We don't 5582 * even on failure to read cable information. We don't
5583 * get here for QME, so IS_QME check not needed here. 5583 * get here for QME, so IS_QME check not needed here.
5584 */ 5584 */
5585 le2 = (!ret && qd->cache.atten[1] >= qib_long_atten && 5585 if (!ret && !ppd->dd->cspec->r1) {
5586 !ppd->dd->cspec->r1 && QSFP_IS_CU(qd->cache.tech)) ? 5586 if (QSFP_IS_ACTIVE_FAR(qd->cache.tech))
5587 LE2_5m : LE2_DEFAULT; 5587 le2 = LE2_QME;
5588 else if (qd->cache.atten[1] >= qib_long_atten &&
5589 QSFP_IS_CU(qd->cache.tech))
5590 le2 = LE2_5m;
5591 else
5592 le2 = LE2_DEFAULT;
5593 } else
5594 le2 = LE2_DEFAULT;
5588 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7)); 5595 ibsd_wr_allchans(ppd, 13, (le2 << 7), BMASK(9, 7));
5589 init_txdds_table(ppd, 0); 5596 init_txdds_table(ppd, 0);
5590} 5597}
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 5ad224e4a38b..8fd3df5bf04d 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -464,8 +464,9 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
464 memset(smp->data, 0, sizeof(smp->data)); 464 memset(smp->data, 0, sizeof(smp->data));
465 465
466 /* Only return the mkey if the protection field allows it. */ 466 /* Only return the mkey if the protection field allows it. */
467 if (smp->method == IB_MGMT_METHOD_SET || ibp->mkey == smp->mkey || 467 if (!(smp->method == IB_MGMT_METHOD_GET &&
468 ibp->mkeyprot == 0) 468 ibp->mkey != smp->mkey &&
469 ibp->mkeyprot == 1))
469 pip->mkey = ibp->mkey; 470 pip->mkey = ibp->mkey;
470 pip->gid_prefix = ibp->gid_prefix; 471 pip->gid_prefix = ibp->gid_prefix;
471 lid = ppd->lid; 472 lid = ppd->lid;
@@ -705,7 +706,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
705 lwe = pip->link_width_enabled; 706 lwe = pip->link_width_enabled;
706 if (lwe) { 707 if (lwe) {
707 if (lwe == 0xFF) 708 if (lwe == 0xFF)
708 lwe = ppd->link_width_supported; 709 set_link_width_enabled(ppd, ppd->link_width_supported);
709 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) 710 else if (lwe >= 16 || (lwe & ~ppd->link_width_supported))
710 smp->status |= IB_SMP_INVALID_FIELD; 711 smp->status |= IB_SMP_INVALID_FIELD;
711 else if (lwe != ppd->link_width_enabled) 712 else if (lwe != ppd->link_width_enabled)
@@ -720,7 +721,8 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
720 * speeds. 721 * speeds.
721 */ 722 */
722 if (lse == 15) 723 if (lse == 15)
723 lse = ppd->link_speed_supported; 724 set_link_speed_enabled(ppd,
725 ppd->link_speed_supported);
724 else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) 726 else if (lse >= 8 || (lse & ~ppd->link_speed_supported))
725 smp->status |= IB_SMP_INVALID_FIELD; 727 smp->status |= IB_SMP_INVALID_FIELD;
726 else if (lse != ppd->link_speed_enabled) 728 else if (lse != ppd->link_speed_enabled)
@@ -849,7 +851,7 @@ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev,
849 if (clientrereg) 851 if (clientrereg)
850 pip->clientrereg_resv_subnetto |= 0x80; 852 pip->clientrereg_resv_subnetto |= 0x80;
851 853
852 goto done; 854 goto get_only;
853 855
854err: 856err:
855 smp->status |= IB_SMP_INVALID_FIELD; 857 smp->status |= IB_SMP_INVALID_FIELD;
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 19b527bafd57..c109bbdc90ac 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -79,6 +79,8 @@
79extern const char *const qib_qsfp_devtech[16]; 79extern const char *const qib_qsfp_devtech[16];
80/* Active Equalization includes fiber, copper full EQ, and copper near Eq */ 80/* Active Equalization includes fiber, copper full EQ, and copper near Eq */
81#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1) 81#define QSFP_IS_ACTIVE(tech) ((0xA2FF >> ((tech) >> 4)) & 1)
82/* Active Equalization includes fiber, copper full EQ, and copper far Eq */
83#define QSFP_IS_ACTIVE_FAR(tech) ((0x32FF >> ((tech) >> 4)) & 1)
82/* Attenuation should be valid for copper other than full/near Eq */ 84/* Attenuation should be valid for copper other than full/near Eq */
83#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1) 85#define QSFP_HAS_ATTEN(tech) ((0x4D00 >> ((tech) >> 4)) & 1)
84/* Length is only valid if technology is "copper" */ 86/* Length is only valid if technology is "copper" */