aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/core/cma.c207
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/ucma.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c115
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h36
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c16
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c5
13 files changed, 264 insertions, 189 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 5ed9d25d021a..99dde874fbbd 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -148,6 +148,7 @@ struct rdma_id_private {
148 u32 qp_num; 148 u32 qp_num;
149 u8 srq; 149 u8 srq;
150 u8 tos; 150 u8 tos;
151 u8 reuseaddr;
151}; 152};
152 153
153struct cma_multicast { 154struct cma_multicast {
@@ -712,6 +713,21 @@ static inline int cma_any_addr(struct sockaddr *addr)
712 return cma_zero_addr(addr) || cma_loopback_addr(addr); 713 return cma_zero_addr(addr) || cma_loopback_addr(addr);
713} 714}
714 715
716static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
717{
718 if (src->sa_family != dst->sa_family)
719 return -1;
720
721 switch (src->sa_family) {
722 case AF_INET:
723 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
724 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
725 default:
726 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
727 &((struct sockaddr_in6 *) dst)->sin6_addr);
728 }
729}
730
715static inline __be16 cma_port(struct sockaddr *addr) 731static inline __be16 cma_port(struct sockaddr *addr)
716{ 732{
717 if (addr->sa_family == AF_INET) 733 if (addr->sa_family == AF_INET)
@@ -1564,50 +1580,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)
1564 mutex_unlock(&lock); 1580 mutex_unlock(&lock);
1565} 1581}
1566 1582
1567int rdma_listen(struct rdma_cm_id *id, int backlog)
1568{
1569 struct rdma_id_private *id_priv;
1570 int ret;
1571
1572 id_priv = container_of(id, struct rdma_id_private, id);
1573 if (id_priv->state == CMA_IDLE) {
1574 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
1575 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
1576 if (ret)
1577 return ret;
1578 }
1579
1580 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
1581 return -EINVAL;
1582
1583 id_priv->backlog = backlog;
1584 if (id->device) {
1585 switch (rdma_node_get_transport(id->device->node_type)) {
1586 case RDMA_TRANSPORT_IB:
1587 ret = cma_ib_listen(id_priv);
1588 if (ret)
1589 goto err;
1590 break;
1591 case RDMA_TRANSPORT_IWARP:
1592 ret = cma_iw_listen(id_priv, backlog);
1593 if (ret)
1594 goto err;
1595 break;
1596 default:
1597 ret = -ENOSYS;
1598 goto err;
1599 }
1600 } else
1601 cma_listen_on_all(id_priv);
1602
1603 return 0;
1604err:
1605 id_priv->backlog = 0;
1606 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
1607 return ret;
1608}
1609EXPORT_SYMBOL(rdma_listen);
1610
1611void rdma_set_service_type(struct rdma_cm_id *id, int tos) 1583void rdma_set_service_type(struct rdma_cm_id *id, int tos)
1612{ 1584{
1613 struct rdma_id_private *id_priv; 1585 struct rdma_id_private *id_priv;
@@ -2090,6 +2062,25 @@ err:
2090} 2062}
2091EXPORT_SYMBOL(rdma_resolve_addr); 2063EXPORT_SYMBOL(rdma_resolve_addr);
2092 2064
2065int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
2066{
2067 struct rdma_id_private *id_priv;
2068 unsigned long flags;
2069 int ret;
2070
2071 id_priv = container_of(id, struct rdma_id_private, id);
2072 spin_lock_irqsave(&id_priv->lock, flags);
2073 if (id_priv->state == CMA_IDLE) {
2074 id_priv->reuseaddr = reuse;
2075 ret = 0;
2076 } else {
2077 ret = -EINVAL;
2078 }
2079 spin_unlock_irqrestore(&id_priv->lock, flags);
2080 return ret;
2081}
2082EXPORT_SYMBOL(rdma_set_reuseaddr);
2083
2093static void cma_bind_port(struct rdma_bind_list *bind_list, 2084static void cma_bind_port(struct rdma_bind_list *bind_list,
2094 struct rdma_id_private *id_priv) 2085 struct rdma_id_private *id_priv)
2095{ 2086{
@@ -2165,41 +2156,71 @@ retry:
2165 return -EADDRNOTAVAIL; 2156 return -EADDRNOTAVAIL;
2166} 2157}
2167 2158
2168static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) 2159/*
2160 * Check that the requested port is available. This is called when trying to
2161 * bind to a specific port, or when trying to listen on a bound port. In
2162 * the latter case, the provided id_priv may already be on the bind_list, but
2163 * we still need to check that it's okay to start listening.
2164 */
2165static int cma_check_port(struct rdma_bind_list *bind_list,
2166 struct rdma_id_private *id_priv, uint8_t reuseaddr)
2169{ 2167{
2170 struct rdma_id_private *cur_id; 2168 struct rdma_id_private *cur_id;
2171 struct sockaddr_in *sin, *cur_sin; 2169 struct sockaddr *addr, *cur_addr;
2172 struct rdma_bind_list *bind_list;
2173 struct hlist_node *node; 2170 struct hlist_node *node;
2171
2172 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
2173 if (cma_any_addr(addr) && !reuseaddr)
2174 return -EADDRNOTAVAIL;
2175
2176 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2177 if (id_priv == cur_id)
2178 continue;
2179
2180 if ((cur_id->state == CMA_LISTEN) ||
2181 !reuseaddr || !cur_id->reuseaddr) {
2182 cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr;
2183 if (cma_any_addr(cur_addr))
2184 return -EADDRNOTAVAIL;
2185
2186 if (!cma_addr_cmp(addr, cur_addr))
2187 return -EADDRINUSE;
2188 }
2189 }
2190 return 0;
2191}
2192
2193static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv)
2194{
2195 struct rdma_bind_list *bind_list;
2174 unsigned short snum; 2196 unsigned short snum;
2197 int ret;
2175 2198
2176 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 2199 snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));
2177 snum = ntohs(sin->sin_port);
2178 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) 2200 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
2179 return -EACCES; 2201 return -EACCES;
2180 2202
2181 bind_list = idr_find(ps, snum); 2203 bind_list = idr_find(ps, snum);
2182 if (!bind_list) 2204 if (!bind_list) {
2183 return cma_alloc_port(ps, id_priv, snum); 2205 ret = cma_alloc_port(ps, id_priv, snum);
2184 2206 } else {
2185 /* 2207 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
2186 * We don't support binding to any address if anyone is bound to 2208 if (!ret)
2187 * a specific address on the same port. 2209 cma_bind_port(bind_list, id_priv);
2188 */
2189 if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr))
2190 return -EADDRNOTAVAIL;
2191
2192 hlist_for_each_entry(cur_id, node, &bind_list->owners, node) {
2193 if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr))
2194 return -EADDRNOTAVAIL;
2195
2196 cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr;
2197 if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr)
2198 return -EADDRINUSE;
2199 } 2210 }
2211 return ret;
2212}
2200 2213
2201 cma_bind_port(bind_list, id_priv); 2214static int cma_bind_listen(struct rdma_id_private *id_priv)
2202 return 0; 2215{
2216 struct rdma_bind_list *bind_list = id_priv->bind_list;
2217 int ret = 0;
2218
2219 mutex_lock(&lock);
2220 if (bind_list->owners.first->next)
2221 ret = cma_check_port(bind_list, id_priv, 0);
2222 mutex_unlock(&lock);
2223 return ret;
2203} 2224}
2204 2225
2205static int cma_get_port(struct rdma_id_private *id_priv) 2226static int cma_get_port(struct rdma_id_private *id_priv)
@@ -2253,6 +2274,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
2253 return 0; 2274 return 0;
2254} 2275}
2255 2276
2277int rdma_listen(struct rdma_cm_id *id, int backlog)
2278{
2279 struct rdma_id_private *id_priv;
2280 int ret;
2281
2282 id_priv = container_of(id, struct rdma_id_private, id);
2283 if (id_priv->state == CMA_IDLE) {
2284 ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET;
2285 ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr);
2286 if (ret)
2287 return ret;
2288 }
2289
2290 if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN))
2291 return -EINVAL;
2292
2293 if (id_priv->reuseaddr) {
2294 ret = cma_bind_listen(id_priv);
2295 if (ret)
2296 goto err;
2297 }
2298
2299 id_priv->backlog = backlog;
2300 if (id->device) {
2301 switch (rdma_node_get_transport(id->device->node_type)) {
2302 case RDMA_TRANSPORT_IB:
2303 ret = cma_ib_listen(id_priv);
2304 if (ret)
2305 goto err;
2306 break;
2307 case RDMA_TRANSPORT_IWARP:
2308 ret = cma_iw_listen(id_priv, backlog);
2309 if (ret)
2310 goto err;
2311 break;
2312 default:
2313 ret = -ENOSYS;
2314 goto err;
2315 }
2316 } else
2317 cma_listen_on_all(id_priv);
2318
2319 return 0;
2320err:
2321 id_priv->backlog = 0;
2322 cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND);
2323 return ret;
2324}
2325EXPORT_SYMBOL(rdma_listen);
2326
2256int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) 2327int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
2257{ 2328{
2258 struct rdma_id_private *id_priv; 2329 struct rdma_id_private *id_priv;
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index 2a1e9ae134b4..a9c042345c6f 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -725,7 +725,7 @@ static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
725 */ 725 */
726 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags); 726 clear_bit(IWCM_F_CONNECT_WAIT, &cm_id_priv->flags);
727 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT); 727 BUG_ON(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
728 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) { 728 if (iw_event->status == 0) {
729 cm_id_priv->id.local_addr = iw_event->local_addr; 729 cm_id_priv->id.local_addr = iw_event->local_addr;
730 cm_id_priv->id.remote_addr = iw_event->remote_addr; 730 cm_id_priv->id.remote_addr = iw_event->remote_addr;
731 cm_id_priv->state = IW_CM_STATE_ESTABLISHED; 731 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ec1e9da1488b..b3fa798525b2 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -883,6 +883,13 @@ static int ucma_set_option_id(struct ucma_context *ctx, int optname,
883 } 883 }
884 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); 884 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
885 break; 885 break;
886 case RDMA_OPTION_ID_REUSEADDR:
887 if (optlen != sizeof(int)) {
888 ret = -EINVAL;
889 break;
890 }
891 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
892 break;
886 default: 893 default:
887 ret = -ENOSYS; 894 ret = -ENOSYS;
888 } 895 }
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 9d8dcfab2b38..d7ee70fc9173 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1198,9 +1198,7 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1198 } 1198 }
1199 PDBG("%s ep %p status %d error %d\n", __func__, ep, 1199 PDBG("%s ep %p status %d error %d\n", __func__, ep,
1200 rpl->status, status2errno(rpl->status)); 1200 rpl->status, status2errno(rpl->status));
1201 ep->com.wr_wait.ret = status2errno(rpl->status); 1201 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1202 ep->com.wr_wait.done = 1;
1203 wake_up(&ep->com.wr_wait.wait);
1204 1202
1205 return 0; 1203 return 0;
1206} 1204}
@@ -1234,9 +1232,7 @@ static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1234 struct c4iw_listen_ep *ep = lookup_stid(t, stid); 1232 struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1235 1233
1236 PDBG("%s ep %p\n", __func__, ep); 1234 PDBG("%s ep %p\n", __func__, ep);
1237 ep->com.wr_wait.ret = status2errno(rpl->status); 1235 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
1238 ep->com.wr_wait.done = 1;
1239 wake_up(&ep->com.wr_wait.wait);
1240 return 0; 1236 return 0;
1241} 1237}
1242 1238
@@ -1466,7 +1462,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1466 struct c4iw_qp_attributes attrs; 1462 struct c4iw_qp_attributes attrs;
1467 int disconnect = 1; 1463 int disconnect = 1;
1468 int release = 0; 1464 int release = 0;
1469 int closing = 0; 1465 int abort = 0;
1470 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1471 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1472 1468
@@ -1492,23 +1488,22 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1492 * in rdma connection migration (see c4iw_accept_cr()). 1488 * in rdma connection migration (see c4iw_accept_cr()).
1493 */ 1489 */
1494 __state_set(&ep->com, CLOSING); 1490 __state_set(&ep->com, CLOSING);
1495 ep->com.wr_wait.done = 1;
1496 ep->com.wr_wait.ret = -ECONNRESET;
1497 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1491 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1498 wake_up(&ep->com.wr_wait.wait); 1492 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1499 break; 1493 break;
1500 case MPA_REP_SENT: 1494 case MPA_REP_SENT:
1501 __state_set(&ep->com, CLOSING); 1495 __state_set(&ep->com, CLOSING);
1502 ep->com.wr_wait.done = 1;
1503 ep->com.wr_wait.ret = -ECONNRESET;
1504 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 1496 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1505 wake_up(&ep->com.wr_wait.wait); 1497 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1506 break; 1498 break;
1507 case FPDU_MODE: 1499 case FPDU_MODE:
1508 start_ep_timer(ep); 1500 start_ep_timer(ep);
1509 __state_set(&ep->com, CLOSING); 1501 __state_set(&ep->com, CLOSING);
1510 closing = 1; 1502 attrs.next_state = C4IW_QP_STATE_CLOSING;
1503 abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1504 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1511 peer_close_upcall(ep); 1505 peer_close_upcall(ep);
1506 disconnect = 1;
1512 break; 1507 break;
1513 case ABORTING: 1508 case ABORTING:
1514 disconnect = 0; 1509 disconnect = 0;
@@ -1536,11 +1531,6 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1536 BUG_ON(1); 1531 BUG_ON(1);
1537 } 1532 }
1538 mutex_unlock(&ep->com.mutex); 1533 mutex_unlock(&ep->com.mutex);
1539 if (closing) {
1540 attrs.next_state = C4IW_QP_STATE_CLOSING;
1541 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1542 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1543 }
1544 if (disconnect) 1534 if (disconnect)
1545 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1535 c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1546 if (release) 1536 if (release)
@@ -1581,9 +1571,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1581 /* 1571 /*
1582 * Wake up any threads in rdma_init() or rdma_fini(). 1572 * Wake up any threads in rdma_init() or rdma_fini().
1583 */ 1573 */
1584 ep->com.wr_wait.done = 1; 1574 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1585 ep->com.wr_wait.ret = -ECONNRESET;
1586 wake_up(&ep->com.wr_wait.wait);
1587 1575
1588 mutex_lock(&ep->com.mutex); 1576 mutex_lock(&ep->com.mutex);
1589 switch (ep->com.state) { 1577 switch (ep->com.state) {
@@ -1710,14 +1698,14 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1710 ep = lookup_tid(t, tid); 1698 ep = lookup_tid(t, tid);
1711 BUG_ON(!ep); 1699 BUG_ON(!ep);
1712 1700
1713 if (ep->com.qp) { 1701 if (ep && ep->com.qp) {
1714 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 1702 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
1715 ep->com.qp->wq.sq.qid); 1703 ep->com.qp->wq.sq.qid);
1716 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1704 attrs.next_state = C4IW_QP_STATE_TERMINATE;
1717 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1705 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1718 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1706 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1719 } else 1707 } else
1720 printk(KERN_WARNING MOD "TERM received tid %u no qp\n", tid); 1708 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
1721 1709
1722 return 0; 1710 return 0;
1723} 1711}
@@ -2296,14 +2284,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2296 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 2284 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2297 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 2285 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
2298 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 2286 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2299 if (wr_waitp) { 2287 if (wr_waitp)
2300 if (ret) 2288 c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2301 wr_waitp->ret = -ret;
2302 else
2303 wr_waitp->ret = 0;
2304 wr_waitp->done = 1;
2305 wake_up(&wr_waitp->wait);
2306 }
2307 kfree_skb(skb); 2289 kfree_skb(skb);
2308 break; 2290 break;
2309 case 2: 2291 case 2:
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index e29172c2afcb..40a13cc633a3 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -44,7 +44,7 @@ MODULE_DESCRIPTION("Chelsio T4 RDMA Driver");
44MODULE_LICENSE("Dual BSD/GPL"); 44MODULE_LICENSE("Dual BSD/GPL");
45MODULE_VERSION(DRV_VERSION); 45MODULE_VERSION(DRV_VERSION);
46 46
47static LIST_HEAD(dev_list); 47static LIST_HEAD(uld_ctx_list);
48static DEFINE_MUTEX(dev_mutex); 48static DEFINE_MUTEX(dev_mutex);
49 49
50static struct dentry *c4iw_debugfs_root; 50static struct dentry *c4iw_debugfs_root;
@@ -370,18 +370,23 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
370 c4iw_destroy_resource(&rdev->resource); 370 c4iw_destroy_resource(&rdev->resource);
371} 371}
372 372
373static void c4iw_remove(struct c4iw_dev *dev) 373struct uld_ctx {
374 struct list_head entry;
375 struct cxgb4_lld_info lldi;
376 struct c4iw_dev *dev;
377};
378
379static void c4iw_remove(struct uld_ctx *ctx)
374{ 380{
375 PDBG("%s c4iw_dev %p\n", __func__, dev); 381 PDBG("%s c4iw_dev %p\n", __func__, ctx->dev);
376 list_del(&dev->entry); 382 c4iw_unregister_device(ctx->dev);
377 if (dev->registered) 383 c4iw_rdev_close(&ctx->dev->rdev);
378 c4iw_unregister_device(dev); 384 idr_destroy(&ctx->dev->cqidr);
379 c4iw_rdev_close(&dev->rdev); 385 idr_destroy(&ctx->dev->qpidr);
380 idr_destroy(&dev->cqidr); 386 idr_destroy(&ctx->dev->mmidr);
381 idr_destroy(&dev->qpidr); 387 iounmap(ctx->dev->rdev.oc_mw_kva);
382 idr_destroy(&dev->mmidr); 388 ib_dealloc_device(&ctx->dev->ibdev);
383 iounmap(dev->rdev.oc_mw_kva); 389 ctx->dev = NULL;
384 ib_dealloc_device(&dev->ibdev);
385} 390}
386 391
387static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) 392static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
@@ -392,7 +397,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
392 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp)); 397 devp = (struct c4iw_dev *)ib_alloc_device(sizeof(*devp));
393 if (!devp) { 398 if (!devp) {
394 printk(KERN_ERR MOD "Cannot allocate ib device\n"); 399 printk(KERN_ERR MOD "Cannot allocate ib device\n");
395 return NULL; 400 return ERR_PTR(-ENOMEM);
396 } 401 }
397 devp->rdev.lldi = *infop; 402 devp->rdev.lldi = *infop;
398 403
@@ -402,27 +407,23 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
402 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa, 407 devp->rdev.oc_mw_kva = ioremap_wc(devp->rdev.oc_mw_pa,
403 devp->rdev.lldi.vr->ocq.size); 408 devp->rdev.lldi.vr->ocq.size);
404 409
405 printk(KERN_INFO MOD "ocq memory: " 410 PDBG(KERN_INFO MOD "ocq memory: "
406 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n", 411 "hw_start 0x%x size %u mw_pa 0x%lx mw_kva %p\n",
407 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size, 412 devp->rdev.lldi.vr->ocq.start, devp->rdev.lldi.vr->ocq.size,
408 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva); 413 devp->rdev.oc_mw_pa, devp->rdev.oc_mw_kva);
409 414
410 mutex_lock(&dev_mutex);
411
412 ret = c4iw_rdev_open(&devp->rdev); 415 ret = c4iw_rdev_open(&devp->rdev);
413 if (ret) { 416 if (ret) {
414 mutex_unlock(&dev_mutex); 417 mutex_unlock(&dev_mutex);
415 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret); 418 printk(KERN_ERR MOD "Unable to open CXIO rdev err %d\n", ret);
416 ib_dealloc_device(&devp->ibdev); 419 ib_dealloc_device(&devp->ibdev);
417 return NULL; 420 return ERR_PTR(ret);
418 } 421 }
419 422
420 idr_init(&devp->cqidr); 423 idr_init(&devp->cqidr);
421 idr_init(&devp->qpidr); 424 idr_init(&devp->qpidr);
422 idr_init(&devp->mmidr); 425 idr_init(&devp->mmidr);
423 spin_lock_init(&devp->lock); 426 spin_lock_init(&devp->lock);
424 list_add_tail(&devp->entry, &dev_list);
425 mutex_unlock(&dev_mutex);
426 427
427 if (c4iw_debugfs_root) { 428 if (c4iw_debugfs_root) {
428 devp->debugfs_root = debugfs_create_dir( 429 devp->debugfs_root = debugfs_create_dir(
@@ -435,7 +436,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
435 436
436static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) 437static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
437{ 438{
438 struct c4iw_dev *dev; 439 struct uld_ctx *ctx;
439 static int vers_printed; 440 static int vers_printed;
440 int i; 441 int i;
441 442
@@ -443,25 +444,33 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
443 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n", 444 printk(KERN_INFO MOD "Chelsio T4 RDMA Driver - version %s\n",
444 DRV_VERSION); 445 DRV_VERSION);
445 446
446 dev = c4iw_alloc(infop); 447 ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
447 if (!dev) 448 if (!ctx) {
449 ctx = ERR_PTR(-ENOMEM);
448 goto out; 450 goto out;
451 }
452 ctx->lldi = *infop;
449 453
450 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", 454 PDBG("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n",
451 __func__, pci_name(dev->rdev.lldi.pdev), 455 __func__, pci_name(ctx->lldi.pdev),
452 dev->rdev.lldi.nchan, dev->rdev.lldi.nrxq, 456 ctx->lldi.nchan, ctx->lldi.nrxq,
453 dev->rdev.lldi.ntxq, dev->rdev.lldi.nports); 457 ctx->lldi.ntxq, ctx->lldi.nports);
458
459 mutex_lock(&dev_mutex);
460 list_add_tail(&ctx->entry, &uld_ctx_list);
461 mutex_unlock(&dev_mutex);
454 462
455 for (i = 0; i < dev->rdev.lldi.nrxq; i++) 463 for (i = 0; i < ctx->lldi.nrxq; i++)
456 PDBG("rxqid[%u] %u\n", i, dev->rdev.lldi.rxq_ids[i]); 464 PDBG("rxqid[%u] %u\n", i, ctx->lldi.rxq_ids[i]);
457out: 465out:
458 return dev; 466 return ctx;
459} 467}
460 468
461static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp, 469static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,
462 const struct pkt_gl *gl) 470 const struct pkt_gl *gl)
463{ 471{
464 struct c4iw_dev *dev = handle; 472 struct uld_ctx *ctx = handle;
473 struct c4iw_dev *dev = ctx->dev;
465 struct sk_buff *skb; 474 struct sk_buff *skb;
466 const struct cpl_act_establish *rpl; 475 const struct cpl_act_establish *rpl;
467 unsigned int opcode; 476 unsigned int opcode;
@@ -503,47 +512,49 @@ nomem:
503 512
504static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) 513static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
505{ 514{
506 struct c4iw_dev *dev = handle; 515 struct uld_ctx *ctx = handle;
507 516
508 PDBG("%s new_state %u\n", __func__, new_state); 517 PDBG("%s new_state %u\n", __func__, new_state);
509 switch (new_state) { 518 switch (new_state) {
510 case CXGB4_STATE_UP: 519 case CXGB4_STATE_UP:
511 printk(KERN_INFO MOD "%s: Up\n", pci_name(dev->rdev.lldi.pdev)); 520 printk(KERN_INFO MOD "%s: Up\n", pci_name(ctx->lldi.pdev));
512 if (!dev->registered) { 521 if (!ctx->dev) {
513 int ret; 522 int ret = 0;
514 ret = c4iw_register_device(dev); 523
515 if (ret) 524 ctx->dev = c4iw_alloc(&ctx->lldi);
525 if (!IS_ERR(ctx->dev))
526 ret = c4iw_register_device(ctx->dev);
527 if (IS_ERR(ctx->dev) || ret)
516 printk(KERN_ERR MOD 528 printk(KERN_ERR MOD
517 "%s: RDMA registration failed: %d\n", 529 "%s: RDMA registration failed: %d\n",
518 pci_name(dev->rdev.lldi.pdev), ret); 530 pci_name(ctx->lldi.pdev), ret);
519 } 531 }
520 break; 532 break;
521 case CXGB4_STATE_DOWN: 533 case CXGB4_STATE_DOWN:
522 printk(KERN_INFO MOD "%s: Down\n", 534 printk(KERN_INFO MOD "%s: Down\n",
523 pci_name(dev->rdev.lldi.pdev)); 535 pci_name(ctx->lldi.pdev));
524 if (dev->registered) 536 if (ctx->dev)
525 c4iw_unregister_device(dev); 537 c4iw_remove(ctx);
526 break; 538 break;
527 case CXGB4_STATE_START_RECOVERY: 539 case CXGB4_STATE_START_RECOVERY:
528 printk(KERN_INFO MOD "%s: Fatal Error\n", 540 printk(KERN_INFO MOD "%s: Fatal Error\n",
529 pci_name(dev->rdev.lldi.pdev)); 541 pci_name(ctx->lldi.pdev));
530 dev->rdev.flags |= T4_FATAL_ERROR; 542 if (ctx->dev) {
531 if (dev->registered) {
532 struct ib_event event; 543 struct ib_event event;
533 544
545 ctx->dev->rdev.flags |= T4_FATAL_ERROR;
534 memset(&event, 0, sizeof event); 546 memset(&event, 0, sizeof event);
535 event.event = IB_EVENT_DEVICE_FATAL; 547 event.event = IB_EVENT_DEVICE_FATAL;
536 event.device = &dev->ibdev; 548 event.device = &ctx->dev->ibdev;
537 ib_dispatch_event(&event); 549 ib_dispatch_event(&event);
538 c4iw_unregister_device(dev); 550 c4iw_remove(ctx);
539 } 551 }
540 break; 552 break;
541 case CXGB4_STATE_DETACH: 553 case CXGB4_STATE_DETACH:
542 printk(KERN_INFO MOD "%s: Detach\n", 554 printk(KERN_INFO MOD "%s: Detach\n",
543 pci_name(dev->rdev.lldi.pdev)); 555 pci_name(ctx->lldi.pdev));
544 mutex_lock(&dev_mutex); 556 if (ctx->dev)
545 c4iw_remove(dev); 557 c4iw_remove(ctx);
546 mutex_unlock(&dev_mutex);
547 break; 558 break;
548 } 559 }
549 return 0; 560 return 0;
@@ -576,11 +587,13 @@ static int __init c4iw_init_module(void)
576 587
577static void __exit c4iw_exit_module(void) 588static void __exit c4iw_exit_module(void)
578{ 589{
579 struct c4iw_dev *dev, *tmp; 590 struct uld_ctx *ctx, *tmp;
580 591
581 mutex_lock(&dev_mutex); 592 mutex_lock(&dev_mutex);
582 list_for_each_entry_safe(dev, tmp, &dev_list, entry) { 593 list_for_each_entry_safe(ctx, tmp, &uld_ctx_list, entry) {
583 c4iw_remove(dev); 594 if (ctx->dev)
595 c4iw_remove(ctx);
596 kfree(ctx);
584 } 597 }
585 mutex_unlock(&dev_mutex); 598 mutex_unlock(&dev_mutex);
586 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 599 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 9f6166f59268..35d2a5dd9bb4 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -131,42 +131,58 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
131 131
132#define C4IW_WR_TO (10*HZ) 132#define C4IW_WR_TO (10*HZ)
133 133
134enum {
135 REPLY_READY = 0,
136};
137
134struct c4iw_wr_wait { 138struct c4iw_wr_wait {
135 wait_queue_head_t wait; 139 wait_queue_head_t wait;
136 int done; 140 unsigned long status;
137 int ret; 141 int ret;
138}; 142};
139 143
140static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) 144static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp)
141{ 145{
142 wr_waitp->ret = 0; 146 wr_waitp->ret = 0;
143 wr_waitp->done = 0; 147 wr_waitp->status = 0;
144 init_waitqueue_head(&wr_waitp->wait); 148 init_waitqueue_head(&wr_waitp->wait);
145} 149}
146 150
151static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret)
152{
153 wr_waitp->ret = ret;
154 set_bit(REPLY_READY, &wr_waitp->status);
155 wake_up(&wr_waitp->wait);
156}
157
147static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, 158static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
148 struct c4iw_wr_wait *wr_waitp, 159 struct c4iw_wr_wait *wr_waitp,
149 u32 hwtid, u32 qpid, 160 u32 hwtid, u32 qpid,
150 const char *func) 161 const char *func)
151{ 162{
152 unsigned to = C4IW_WR_TO; 163 unsigned to = C4IW_WR_TO;
153 do { 164 int ret;
154 165
155 wait_event_timeout(wr_waitp->wait, wr_waitp->done, to); 166 do {
156 if (!wr_waitp->done) { 167 ret = wait_event_timeout(wr_waitp->wait,
168 test_and_clear_bit(REPLY_READY, &wr_waitp->status), to);
169 if (!ret) {
157 printk(KERN_ERR MOD "%s - Device %s not responding - " 170 printk(KERN_ERR MOD "%s - Device %s not responding - "
158 "tid %u qpid %u\n", func, 171 "tid %u qpid %u\n", func,
159 pci_name(rdev->lldi.pdev), hwtid, qpid); 172 pci_name(rdev->lldi.pdev), hwtid, qpid);
173 if (c4iw_fatal_error(rdev)) {
174 wr_waitp->ret = -EIO;
175 break;
176 }
160 to = to << 2; 177 to = to << 2;
161 } 178 }
162 } while (!wr_waitp->done); 179 } while (!ret);
163 if (wr_waitp->ret) 180 if (wr_waitp->ret)
164 printk(KERN_WARNING MOD "%s: FW reply %d tid %u qpid %u\n", 181 PDBG("%s: FW reply %d tid %u qpid %u\n",
165 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); 182 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
166 return wr_waitp->ret; 183 return wr_waitp->ret;
167} 184}
168 185
169
170struct c4iw_dev { 186struct c4iw_dev {
171 struct ib_device ibdev; 187 struct ib_device ibdev;
172 struct c4iw_rdev rdev; 188 struct c4iw_rdev rdev;
@@ -175,9 +191,7 @@ struct c4iw_dev {
175 struct idr qpidr; 191 struct idr qpidr;
176 struct idr mmidr; 192 struct idr mmidr;
177 spinlock_t lock; 193 spinlock_t lock;
178 struct list_head entry;
179 struct dentry *debugfs_root; 194 struct dentry *debugfs_root;
180 u8 registered;
181}; 195};
182 196
183static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) 197static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index f66dd8bf5128..5b9e4220ca08 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -516,7 +516,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
516 if (ret) 516 if (ret)
517 goto bail2; 517 goto bail2;
518 } 518 }
519 dev->registered = 1;
520 return 0; 519 return 0;
521bail2: 520bail2:
522 ib_unregister_device(&dev->ibdev); 521 ib_unregister_device(&dev->ibdev);
@@ -535,6 +534,5 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
535 c4iw_class_attributes[i]); 534 c4iw_class_attributes[i]);
536 ib_unregister_device(&dev->ibdev); 535 ib_unregister_device(&dev->ibdev);
537 kfree(dev->ibdev.iwcm); 536 kfree(dev->ibdev.iwcm);
538 dev->registered = 0;
539 return; 537 return;
540} 538}
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 70a5a3c646da..3b773b05a898 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -214,7 +214,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
214 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */ 214 V_FW_RI_RES_WR_HOSTFCMODE(0) | /* no host cidx updates */
215 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */ 215 V_FW_RI_RES_WR_CPRIO(0) | /* don't keep in chip cache */
216 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */ 216 V_FW_RI_RES_WR_PCIECHN(0) | /* set by uP at ri_init time */
217 t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0 | 217 (t4_sq_onchip(&wq->sq) ? F_FW_RI_RES_WR_ONCHIP : 0) |
218 V_FW_RI_RES_WR_IQID(scq->cqid)); 218 V_FW_RI_RES_WR_IQID(scq->cqid));
219 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32( 219 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
220 V_FW_RI_RES_WR_DCAEN(0) | 220 V_FW_RI_RES_WR_DCAEN(0) |
@@ -1210,7 +1210,6 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1210 if (ret) { 1210 if (ret) {
1211 if (internal) 1211 if (internal)
1212 c4iw_get_ep(&qhp->ep->com); 1212 c4iw_get_ep(&qhp->ep->com);
1213 disconnect = abort = 1;
1214 goto err; 1213 goto err;
1215 } 1214 }
1216 break; 1215 break;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 58c0e417bc30..be24ac726114 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -398,7 +398,6 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
398 struct ipath_devdata *dd; 398 struct ipath_devdata *dd;
399 unsigned long long addr; 399 unsigned long long addr;
400 u32 bar0 = 0, bar1 = 0; 400 u32 bar0 = 0, bar1 = 0;
401 u8 rev;
402 401
403 dd = ipath_alloc_devdata(pdev); 402 dd = ipath_alloc_devdata(pdev);
404 if (IS_ERR(dd)) { 403 if (IS_ERR(dd)) {
@@ -540,13 +539,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
540 goto bail_regions; 539 goto bail_regions;
541 } 540 }
542 541
543 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, &rev); 542 dd->ipath_pcirev = pdev->revision;
544 if (ret) {
545 ipath_dev_err(dd, "Failed to read PCI revision ID unit "
546 "%u: err %d\n", dd->ipath_unit, -ret);
547 goto bail_regions; /* shouldn't ever happen */
548 }
549 dd->ipath_pcirev = rev;
550 543
551#if defined(__powerpc__) 544#if defined(__powerpc__)
552 /* There isn't a generic way to specify writethrough mappings */ 545 /* There isn't a generic way to specify writethrough mappings */
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 33c7eedaba6c..e74cdf9ef471 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -2563,7 +2563,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2563 u16 last_ae; 2563 u16 last_ae;
2564 u8 original_hw_tcp_state; 2564 u8 original_hw_tcp_state;
2565 u8 original_ibqp_state; 2565 u8 original_ibqp_state;
2566 enum iw_cm_event_status disconn_status = IW_CM_EVENT_STATUS_OK; 2566 int disconn_status = 0;
2567 int issue_disconn = 0; 2567 int issue_disconn = 0;
2568 int issue_close = 0; 2568 int issue_close = 0;
2569 int issue_flush = 0; 2569 int issue_flush = 0;
@@ -2605,7 +2605,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2605 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) { 2605 (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
2606 issue_disconn = 1; 2606 issue_disconn = 1;
2607 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) 2607 if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
2608 disconn_status = IW_CM_EVENT_STATUS_RESET; 2608 disconn_status = -ECONNRESET;
2609 } 2609 }
2610 2610
2611 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) || 2611 if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
@@ -2666,7 +2666,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
2666 cm_id->provider_data = nesqp; 2666 cm_id->provider_data = nesqp;
2667 /* Send up the close complete event */ 2667 /* Send up the close complete event */
2668 cm_event.event = IW_CM_EVENT_CLOSE; 2668 cm_event.event = IW_CM_EVENT_CLOSE;
2669 cm_event.status = IW_CM_EVENT_STATUS_OK; 2669 cm_event.status = 0;
2670 cm_event.provider_data = cm_id->provider_data; 2670 cm_event.provider_data = cm_id->provider_data;
2671 cm_event.local_addr = cm_id->local_addr; 2671 cm_event.local_addr = cm_id->local_addr;
2672 cm_event.remote_addr = cm_id->remote_addr; 2672 cm_event.remote_addr = cm_id->remote_addr;
@@ -2966,7 +2966,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2966 nes_add_ref(&nesqp->ibqp); 2966 nes_add_ref(&nesqp->ibqp);
2967 2967
2968 cm_event.event = IW_CM_EVENT_ESTABLISHED; 2968 cm_event.event = IW_CM_EVENT_ESTABLISHED;
2969 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; 2969 cm_event.status = 0;
2970 cm_event.provider_data = (void *)nesqp; 2970 cm_event.provider_data = (void *)nesqp;
2971 cm_event.local_addr = cm_id->local_addr; 2971 cm_event.local_addr = cm_id->local_addr;
2972 cm_event.remote_addr = cm_id->remote_addr; 2972 cm_event.remote_addr = cm_id->remote_addr;
@@ -3377,7 +3377,7 @@ static void cm_event_connected(struct nes_cm_event *event)
3377 3377
3378 /* notify OF layer we successfully created the requested connection */ 3378 /* notify OF layer we successfully created the requested connection */
3379 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 3379 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
3380 cm_event.status = IW_CM_EVENT_STATUS_ACCEPTED; 3380 cm_event.status = 0;
3381 cm_event.provider_data = cm_id->provider_data; 3381 cm_event.provider_data = cm_id->provider_data;
3382 cm_event.local_addr.sin_family = AF_INET; 3382 cm_event.local_addr.sin_family = AF_INET;
3383 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port; 3383 cm_event.local_addr.sin_port = cm_id->local_addr.sin_port;
@@ -3484,7 +3484,7 @@ static void cm_event_reset(struct nes_cm_event *event)
3484 nesqp->cm_id = NULL; 3484 nesqp->cm_id = NULL;
3485 /* cm_id->provider_data = NULL; */ 3485 /* cm_id->provider_data = NULL; */
3486 cm_event.event = IW_CM_EVENT_DISCONNECT; 3486 cm_event.event = IW_CM_EVENT_DISCONNECT;
3487 cm_event.status = IW_CM_EVENT_STATUS_RESET; 3487 cm_event.status = -ECONNRESET;
3488 cm_event.provider_data = cm_id->provider_data; 3488 cm_event.provider_data = cm_id->provider_data;
3489 cm_event.local_addr = cm_id->local_addr; 3489 cm_event.local_addr = cm_id->local_addr;
3490 cm_event.remote_addr = cm_id->remote_addr; 3490 cm_event.remote_addr = cm_id->remote_addr;
@@ -3495,7 +3495,7 @@ static void cm_event_reset(struct nes_cm_event *event)
3495 ret = cm_id->event_handler(cm_id, &cm_event); 3495 ret = cm_id->event_handler(cm_id, &cm_event);
3496 atomic_inc(&cm_closes); 3496 atomic_inc(&cm_closes);
3497 cm_event.event = IW_CM_EVENT_CLOSE; 3497 cm_event.event = IW_CM_EVENT_CLOSE;
3498 cm_event.status = IW_CM_EVENT_STATUS_OK; 3498 cm_event.status = 0;
3499 cm_event.provider_data = cm_id->provider_data; 3499 cm_event.provider_data = cm_id->provider_data;
3500 cm_event.local_addr = cm_id->local_addr; 3500 cm_event.local_addr = cm_id->local_addr;
3501 cm_event.remote_addr = cm_id->remote_addr; 3501 cm_event.remote_addr = cm_id->remote_addr;
@@ -3534,7 +3534,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
3534 cm_node, cm_id, jiffies); 3534 cm_node, cm_id, jiffies);
3535 3535
3536 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST; 3536 cm_event.event = IW_CM_EVENT_CONNECT_REQUEST;
3537 cm_event.status = IW_CM_EVENT_STATUS_OK; 3537 cm_event.status = 0;
3538 cm_event.provider_data = (void *)cm_node; 3538 cm_event.provider_data = (void *)cm_node;
3539 3539
3540 cm_event.local_addr.sin_family = AF_INET; 3540 cm_event.local_addr.sin_family = AF_INET;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 26d8018c0a7c..95ca93ceedac 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1484,7 +1484,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
1484 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) { 1484 (nesqp->ibqp_state == IB_QPS_RTR)) && (nesqp->cm_id)) {
1485 cm_id = nesqp->cm_id; 1485 cm_id = nesqp->cm_id;
1486 cm_event.event = IW_CM_EVENT_CONNECT_REPLY; 1486 cm_event.event = IW_CM_EVENT_CONNECT_REPLY;
1487 cm_event.status = IW_CM_EVENT_STATUS_TIMEOUT; 1487 cm_event.status = -ETIMEDOUT;
1488 cm_event.local_addr = cm_id->local_addr; 1488 cm_event.local_addr = cm_id->local_addr;
1489 cm_event.remote_addr = cm_id->remote_addr; 1489 cm_event.remote_addr = cm_id->remote_addr;
1490 cm_event.private_data = NULL; 1490 cm_event.private_data = NULL;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 6bab3eaea70f..9f53e68a096a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -7534,7 +7534,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
7534 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10)); 7534 ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
7535 tstart = get_jiffies_64(); 7535 tstart = get_jiffies_64();
7536 while (chan_done && 7536 while (chan_done &&
7537 !time_after64(tstart, tstart + msecs_to_jiffies(500))) { 7537 !time_after64(get_jiffies_64(),
7538 tstart + msecs_to_jiffies(500))) {
7538 msleep(20); 7539 msleep(20);
7539 for (chan = 0; chan < SERDES_CHANS; ++chan) { 7540 for (chan = 0; chan < SERDES_CHANS; ++chan) {
7540 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx), 7541 rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 48b6674cbc49..891cc2ff5f00 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -526,11 +526,8 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
526 */ 526 */
527 devid = parent->device; 527 devid = parent->device;
528 if (devid >= 0x25e2 && devid <= 0x25fa) { 528 if (devid >= 0x25e2 && devid <= 0x25fa) {
529 u8 rev;
530
531 /* 5000 P/V/X/Z */ 529 /* 5000 P/V/X/Z */
532 pci_read_config_byte(parent, PCI_REVISION_ID, &rev); 530 if (parent->revision <= 0xb2)
533 if (rev <= 0xb2)
534 bits = 1U << 10; 531 bits = 1U << 10;
535 else 532 else
536 bits = 7U << 10; 533 bits = 7U << 10;