diff options
| -rw-r--r-- | drivers/infiniband/core/cm.c | 20 | ||||
| -rw-r--r-- | drivers/infiniband/core/cma.c | 58 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/device.c | 24 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/iw_cxgb4.h | 1 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 8 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ipath/ipath_sysfs.c | 1 | ||||
| -rw-r--r-- | drivers/net/cxgb4/t4_msg.h | 1 |
9 files changed, 83 insertions, 40 deletions
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index 64e0903091a8..f804e28e1ebb 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
| @@ -1988,6 +1988,10 @@ int ib_send_cm_dreq(struct ib_cm_id *cm_id, | |||
| 1988 | goto out; | 1988 | goto out; |
| 1989 | } | 1989 | } |
| 1990 | 1990 | ||
| 1991 | if (cm_id->lap_state == IB_CM_LAP_SENT || | ||
| 1992 | cm_id->lap_state == IB_CM_MRA_LAP_RCVD) | ||
| 1993 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); | ||
| 1994 | |||
| 1991 | ret = cm_alloc_msg(cm_id_priv, &msg); | 1995 | ret = cm_alloc_msg(cm_id_priv, &msg); |
| 1992 | if (ret) { | 1996 | if (ret) { |
| 1993 | cm_enter_timewait(cm_id_priv); | 1997 | cm_enter_timewait(cm_id_priv); |
| @@ -2129,6 +2133,10 @@ static int cm_dreq_handler(struct cm_work *work) | |||
| 2129 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); | 2133 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); |
| 2130 | break; | 2134 | break; |
| 2131 | case IB_CM_ESTABLISHED: | 2135 | case IB_CM_ESTABLISHED: |
| 2136 | if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT || | ||
| 2137 | cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) | ||
| 2138 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); | ||
| 2139 | break; | ||
| 2132 | case IB_CM_MRA_REP_RCVD: | 2140 | case IB_CM_MRA_REP_RCVD: |
| 2133 | break; | 2141 | break; |
| 2134 | case IB_CM_TIMEWAIT: | 2142 | case IB_CM_TIMEWAIT: |
| @@ -2349,9 +2357,18 @@ static int cm_rej_handler(struct cm_work *work) | |||
| 2349 | /* fall through */ | 2357 | /* fall through */ |
| 2350 | case IB_CM_REP_RCVD: | 2358 | case IB_CM_REP_RCVD: |
| 2351 | case IB_CM_MRA_REP_SENT: | 2359 | case IB_CM_MRA_REP_SENT: |
| 2352 | case IB_CM_ESTABLISHED: | ||
| 2353 | cm_enter_timewait(cm_id_priv); | 2360 | cm_enter_timewait(cm_id_priv); |
| 2354 | break; | 2361 | break; |
| 2362 | case IB_CM_ESTABLISHED: | ||
| 2363 | if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT || | ||
| 2364 | cm_id_priv->id.lap_state == IB_CM_LAP_SENT) { | ||
| 2365 | if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT) | ||
| 2366 | ib_cancel_mad(cm_id_priv->av.port->mad_agent, | ||
| 2367 | cm_id_priv->msg); | ||
| 2368 | cm_enter_timewait(cm_id_priv); | ||
| 2369 | break; | ||
| 2370 | } | ||
| 2371 | /* fall through */ | ||
| 2355 | default: | 2372 | default: |
| 2356 | spin_unlock_irq(&cm_id_priv->lock); | 2373 | spin_unlock_irq(&cm_id_priv->lock); |
| 2357 | ret = -EINVAL; | 2374 | ret = -EINVAL; |
| @@ -2989,6 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work) | |||
| 2989 | goto out; /* No match. */ | 3006 | goto out; /* No match. */ |
| 2990 | } | 3007 | } |
| 2991 | atomic_inc(&cur_cm_id_priv->refcount); | 3008 | atomic_inc(&cur_cm_id_priv->refcount); |
| 3009 | atomic_inc(&cm_id_priv->refcount); | ||
| 2992 | spin_unlock_irq(&cm.lock); | 3010 | spin_unlock_irq(&cm.lock); |
| 2993 | 3011 | ||
| 2994 | cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; | 3012 | cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6884da24fde1..5ed9d25d021a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -308,11 +308,13 @@ static inline void release_mc(struct kref *kref) | |||
| 308 | kfree(mc); | 308 | kfree(mc); |
| 309 | } | 309 | } |
| 310 | 310 | ||
| 311 | static void cma_detach_from_dev(struct rdma_id_private *id_priv) | 311 | static void cma_release_dev(struct rdma_id_private *id_priv) |
| 312 | { | 312 | { |
| 313 | mutex_lock(&lock); | ||
| 313 | list_del(&id_priv->list); | 314 | list_del(&id_priv->list); |
| 314 | cma_deref_dev(id_priv->cma_dev); | 315 | cma_deref_dev(id_priv->cma_dev); |
| 315 | id_priv->cma_dev = NULL; | 316 | id_priv->cma_dev = NULL; |
| 317 | mutex_unlock(&lock); | ||
| 316 | } | 318 | } |
| 317 | 319 | ||
| 318 | static int cma_set_qkey(struct rdma_id_private *id_priv) | 320 | static int cma_set_qkey(struct rdma_id_private *id_priv) |
| @@ -373,6 +375,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv) | |||
| 373 | enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? | 375 | enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? |
| 374 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; | 376 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; |
| 375 | 377 | ||
| 378 | mutex_lock(&lock); | ||
| 376 | iboe_addr_get_sgid(dev_addr, &iboe_gid); | 379 | iboe_addr_get_sgid(dev_addr, &iboe_gid); |
| 377 | memcpy(&gid, dev_addr->src_dev_addr + | 380 | memcpy(&gid, dev_addr->src_dev_addr + |
| 378 | rdma_addr_gid_offset(dev_addr), sizeof gid); | 381 | rdma_addr_gid_offset(dev_addr), sizeof gid); |
| @@ -398,6 +401,7 @@ out: | |||
| 398 | if (!ret) | 401 | if (!ret) |
| 399 | cma_attach_to_dev(id_priv, cma_dev); | 402 | cma_attach_to_dev(id_priv, cma_dev); |
| 400 | 403 | ||
| 404 | mutex_unlock(&lock); | ||
| 401 | return ret; | 405 | return ret; |
| 402 | } | 406 | } |
| 403 | 407 | ||
| @@ -904,9 +908,14 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
| 904 | state = cma_exch(id_priv, CMA_DESTROYING); | 908 | state = cma_exch(id_priv, CMA_DESTROYING); |
| 905 | cma_cancel_operation(id_priv, state); | 909 | cma_cancel_operation(id_priv, state); |
| 906 | 910 | ||
| 907 | mutex_lock(&lock); | 911 | /* |
| 912 | * Wait for any active callback to finish. New callbacks will find | ||
| 913 | * the id_priv state set to destroying and abort. | ||
| 914 | */ | ||
| 915 | mutex_lock(&id_priv->handler_mutex); | ||
| 916 | mutex_unlock(&id_priv->handler_mutex); | ||
| 917 | |||
| 908 | if (id_priv->cma_dev) { | 918 | if (id_priv->cma_dev) { |
| 909 | mutex_unlock(&lock); | ||
| 910 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | 919 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
| 911 | case RDMA_TRANSPORT_IB: | 920 | case RDMA_TRANSPORT_IB: |
| 912 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) | 921 | if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) |
| @@ -920,10 +929,8 @@ void rdma_destroy_id(struct rdma_cm_id *id) | |||
| 920 | break; | 929 | break; |
| 921 | } | 930 | } |
| 922 | cma_leave_mc_groups(id_priv); | 931 | cma_leave_mc_groups(id_priv); |
| 923 | mutex_lock(&lock); | 932 | cma_release_dev(id_priv); |
| 924 | cma_detach_from_dev(id_priv); | ||
| 925 | } | 933 | } |
| 926 | mutex_unlock(&lock); | ||
| 927 | 934 | ||
| 928 | cma_release_port(id_priv); | 935 | cma_release_port(id_priv); |
| 929 | cma_deref_id(id_priv); | 936 | cma_deref_id(id_priv); |
| @@ -1200,9 +1207,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1200 | } | 1207 | } |
| 1201 | 1208 | ||
| 1202 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1209 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
| 1203 | mutex_lock(&lock); | ||
| 1204 | ret = cma_acquire_dev(conn_id); | 1210 | ret = cma_acquire_dev(conn_id); |
| 1205 | mutex_unlock(&lock); | ||
| 1206 | if (ret) | 1211 | if (ret) |
| 1207 | goto release_conn_id; | 1212 | goto release_conn_id; |
| 1208 | 1213 | ||
| @@ -1210,6 +1215,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1210 | cm_id->context = conn_id; | 1215 | cm_id->context = conn_id; |
| 1211 | cm_id->cm_handler = cma_ib_handler; | 1216 | cm_id->cm_handler = cma_ib_handler; |
| 1212 | 1217 | ||
| 1218 | /* | ||
| 1219 | * Protect against the user destroying conn_id from another thread | ||
| 1220 | * until we're done accessing it. | ||
| 1221 | */ | ||
| 1222 | atomic_inc(&conn_id->refcount); | ||
| 1213 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1223 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
| 1214 | if (!ret) { | 1224 | if (!ret) { |
| 1215 | /* | 1225 | /* |
| @@ -1222,8 +1232,10 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
| 1222 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1232 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
| 1223 | mutex_unlock(&lock); | 1233 | mutex_unlock(&lock); |
| 1224 | mutex_unlock(&conn_id->handler_mutex); | 1234 | mutex_unlock(&conn_id->handler_mutex); |
| 1235 | cma_deref_id(conn_id); | ||
| 1225 | goto out; | 1236 | goto out; |
| 1226 | } | 1237 | } |
| 1238 | cma_deref_id(conn_id); | ||
| 1227 | 1239 | ||
| 1228 | /* Destroy the CM ID by returning a non-zero value. */ | 1240 | /* Destroy the CM ID by returning a non-zero value. */ |
| 1229 | conn_id->cm_id.ib = NULL; | 1241 | conn_id->cm_id.ib = NULL; |
| @@ -1394,9 +1406,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
| 1394 | goto out; | 1406 | goto out; |
| 1395 | } | 1407 | } |
| 1396 | 1408 | ||
| 1397 | mutex_lock(&lock); | ||
| 1398 | ret = cma_acquire_dev(conn_id); | 1409 | ret = cma_acquire_dev(conn_id); |
| 1399 | mutex_unlock(&lock); | ||
| 1400 | if (ret) { | 1410 | if (ret) { |
| 1401 | mutex_unlock(&conn_id->handler_mutex); | 1411 | mutex_unlock(&conn_id->handler_mutex); |
| 1402 | rdma_destroy_id(new_cm_id); | 1412 | rdma_destroy_id(new_cm_id); |
| @@ -1425,17 +1435,25 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
| 1425 | event.param.conn.private_data_len = iw_event->private_data_len; | 1435 | event.param.conn.private_data_len = iw_event->private_data_len; |
| 1426 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; | 1436 | event.param.conn.initiator_depth = attr.max_qp_init_rd_atom; |
| 1427 | event.param.conn.responder_resources = attr.max_qp_rd_atom; | 1437 | event.param.conn.responder_resources = attr.max_qp_rd_atom; |
| 1438 | |||
| 1439 | /* | ||
| 1440 | * Protect against the user destroying conn_id from another thread | ||
| 1441 | * until we're done accessing it. | ||
| 1442 | */ | ||
| 1443 | atomic_inc(&conn_id->refcount); | ||
| 1428 | ret = conn_id->id.event_handler(&conn_id->id, &event); | 1444 | ret = conn_id->id.event_handler(&conn_id->id, &event); |
| 1429 | if (ret) { | 1445 | if (ret) { |
| 1430 | /* User wants to destroy the CM ID */ | 1446 | /* User wants to destroy the CM ID */ |
| 1431 | conn_id->cm_id.iw = NULL; | 1447 | conn_id->cm_id.iw = NULL; |
| 1432 | cma_exch(conn_id, CMA_DESTROYING); | 1448 | cma_exch(conn_id, CMA_DESTROYING); |
| 1433 | mutex_unlock(&conn_id->handler_mutex); | 1449 | mutex_unlock(&conn_id->handler_mutex); |
| 1450 | cma_deref_id(conn_id); | ||
| 1434 | rdma_destroy_id(&conn_id->id); | 1451 | rdma_destroy_id(&conn_id->id); |
| 1435 | goto out; | 1452 | goto out; |
| 1436 | } | 1453 | } |
| 1437 | 1454 | ||
| 1438 | mutex_unlock(&conn_id->handler_mutex); | 1455 | mutex_unlock(&conn_id->handler_mutex); |
| 1456 | cma_deref_id(conn_id); | ||
| 1439 | 1457 | ||
| 1440 | out: | 1458 | out: |
| 1441 | if (dev) | 1459 | if (dev) |
| @@ -1951,20 +1969,11 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
| 1951 | 1969 | ||
| 1952 | memset(&event, 0, sizeof event); | 1970 | memset(&event, 0, sizeof event); |
| 1953 | mutex_lock(&id_priv->handler_mutex); | 1971 | mutex_lock(&id_priv->handler_mutex); |
| 1954 | 1972 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) | |
| 1955 | /* | ||
| 1956 | * Grab mutex to block rdma_destroy_id() from removing the device while | ||
| 1957 | * we're trying to acquire it. | ||
| 1958 | */ | ||
| 1959 | mutex_lock(&lock); | ||
| 1960 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) { | ||
| 1961 | mutex_unlock(&lock); | ||
| 1962 | goto out; | 1973 | goto out; |
| 1963 | } | ||
| 1964 | 1974 | ||
| 1965 | if (!status && !id_priv->cma_dev) | 1975 | if (!status && !id_priv->cma_dev) |
| 1966 | status = cma_acquire_dev(id_priv); | 1976 | status = cma_acquire_dev(id_priv); |
| 1967 | mutex_unlock(&lock); | ||
| 1968 | 1977 | ||
| 1969 | if (status) { | 1978 | if (status) { |
| 1970 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) | 1979 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) |
| @@ -2265,9 +2274,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
| 2265 | if (ret) | 2274 | if (ret) |
| 2266 | goto err1; | 2275 | goto err1; |
| 2267 | 2276 | ||
| 2268 | mutex_lock(&lock); | ||
| 2269 | ret = cma_acquire_dev(id_priv); | 2277 | ret = cma_acquire_dev(id_priv); |
| 2270 | mutex_unlock(&lock); | ||
| 2271 | if (ret) | 2278 | if (ret) |
| 2272 | goto err1; | 2279 | goto err1; |
| 2273 | } | 2280 | } |
| @@ -2279,11 +2286,8 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
| 2279 | 2286 | ||
| 2280 | return 0; | 2287 | return 0; |
| 2281 | err2: | 2288 | err2: |
| 2282 | if (id_priv->cma_dev) { | 2289 | if (id_priv->cma_dev) |
| 2283 | mutex_lock(&lock); | 2290 | cma_release_dev(id_priv); |
| 2284 | cma_detach_from_dev(id_priv); | ||
| 2285 | mutex_unlock(&lock); | ||
| 2286 | } | ||
| 2287 | err1: | 2291 | err1: |
| 2288 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); | 2292 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); |
| 2289 | return ret; | 2293 | return ret; |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 8b00e6c46f01..b4d9e4caf3c9 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
| @@ -61,9 +61,9 @@ static char *states[] = { | |||
| 61 | NULL, | 61 | NULL, |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | static int dack_mode; | 64 | static int dack_mode = 1; |
| 65 | module_param(dack_mode, int, 0644); | 65 | module_param(dack_mode, int, 0644); |
| 66 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)"); | 66 | MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); |
| 67 | 67 | ||
| 68 | int c4iw_max_read_depth = 8; | 68 | int c4iw_max_read_depth = 8; |
| 69 | module_param(c4iw_max_read_depth, int, 0644); | 69 | module_param(c4iw_max_read_depth, int, 0644); |
| @@ -482,6 +482,7 @@ static int send_connect(struct c4iw_ep *ep) | |||
| 482 | TX_CHAN(ep->tx_chan) | | 482 | TX_CHAN(ep->tx_chan) | |
| 483 | SMAC_SEL(ep->smac_idx) | | 483 | SMAC_SEL(ep->smac_idx) | |
| 484 | DSCP(ep->tos) | | 484 | DSCP(ep->tos) | |
| 485 | ULP_MODE(ULP_MODE_TCPDDP) | | ||
| 485 | RCV_BUFSIZ(rcv_win>>10); | 486 | RCV_BUFSIZ(rcv_win>>10); |
| 486 | opt2 = RX_CHANNEL(0) | | 487 | opt2 = RX_CHANNEL(0) | |
| 487 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 488 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
| @@ -1274,6 +1275,7 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb, | |||
| 1274 | TX_CHAN(ep->tx_chan) | | 1275 | TX_CHAN(ep->tx_chan) | |
| 1275 | SMAC_SEL(ep->smac_idx) | | 1276 | SMAC_SEL(ep->smac_idx) | |
| 1276 | DSCP(ep->tos) | | 1277 | DSCP(ep->tos) | |
| 1278 | ULP_MODE(ULP_MODE_TCPDDP) | | ||
| 1277 | RCV_BUFSIZ(rcv_win>>10); | 1279 | RCV_BUFSIZ(rcv_win>>10); |
| 1278 | opt2 = RX_CHANNEL(0) | | 1280 | opt2 = RX_CHANNEL(0) | |
| 1279 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); | 1281 | RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); |
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 54fbc1118abe..e29172c2afcb 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c | |||
| @@ -87,17 +87,22 @@ static int dump_qp(int id, void *p, void *data) | |||
| 87 | return 1; | 87 | return 1; |
| 88 | 88 | ||
| 89 | if (qp->ep) | 89 | if (qp->ep) |
| 90 | cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u " | 90 | cc = snprintf(qpd->buf + qpd->pos, space, |
| 91 | "qp sq id %u rq id %u state %u onchip %u " | ||
| 91 | "ep tid %u state %u %pI4:%u->%pI4:%u\n", | 92 | "ep tid %u state %u %pI4:%u->%pI4:%u\n", |
| 92 | qp->wq.sq.qid, (int)qp->attr.state, | 93 | qp->wq.sq.qid, qp->wq.rq.qid, (int)qp->attr.state, |
| 94 | qp->wq.sq.flags & T4_SQ_ONCHIP, | ||
| 93 | qp->ep->hwtid, (int)qp->ep->com.state, | 95 | qp->ep->hwtid, (int)qp->ep->com.state, |
| 94 | &qp->ep->com.local_addr.sin_addr.s_addr, | 96 | &qp->ep->com.local_addr.sin_addr.s_addr, |
| 95 | ntohs(qp->ep->com.local_addr.sin_port), | 97 | ntohs(qp->ep->com.local_addr.sin_port), |
| 96 | &qp->ep->com.remote_addr.sin_addr.s_addr, | 98 | &qp->ep->com.remote_addr.sin_addr.s_addr, |
| 97 | ntohs(qp->ep->com.remote_addr.sin_port)); | 99 | ntohs(qp->ep->com.remote_addr.sin_port)); |
| 98 | else | 100 | else |
| 99 | cc = snprintf(qpd->buf + qpd->pos, space, "qp id %u state %u\n", | 101 | cc = snprintf(qpd->buf + qpd->pos, space, |
| 100 | qp->wq.sq.qid, (int)qp->attr.state); | 102 | "qp sq id %u rq id %u state %u onchip %u\n", |
| 103 | qp->wq.sq.qid, qp->wq.rq.qid, | ||
| 104 | (int)qp->attr.state, | ||
| 105 | qp->wq.sq.flags & T4_SQ_ONCHIP); | ||
| 101 | if (cc < space) | 106 | if (cc < space) |
| 102 | qpd->pos += cc; | 107 | qpd->pos += cc; |
| 103 | return 0; | 108 | return 0; |
| @@ -368,7 +373,6 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) | |||
| 368 | static void c4iw_remove(struct c4iw_dev *dev) | 373 | static void c4iw_remove(struct c4iw_dev *dev) |
| 369 | { | 374 | { |
| 370 | PDBG("%s c4iw_dev %p\n", __func__, dev); | 375 | PDBG("%s c4iw_dev %p\n", __func__, dev); |
| 371 | cancel_delayed_work_sync(&dev->db_drop_task); | ||
| 372 | list_del(&dev->entry); | 376 | list_del(&dev->entry); |
| 373 | if (dev->registered) | 377 | if (dev->registered) |
| 374 | c4iw_unregister_device(dev); | 378 | c4iw_unregister_device(dev); |
| @@ -523,8 +527,16 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) | |||
| 523 | case CXGB4_STATE_START_RECOVERY: | 527 | case CXGB4_STATE_START_RECOVERY: |
| 524 | printk(KERN_INFO MOD "%s: Fatal Error\n", | 528 | printk(KERN_INFO MOD "%s: Fatal Error\n", |
| 525 | pci_name(dev->rdev.lldi.pdev)); | 529 | pci_name(dev->rdev.lldi.pdev)); |
| 526 | if (dev->registered) | 530 | dev->rdev.flags |= T4_FATAL_ERROR; |
| 531 | if (dev->registered) { | ||
| 532 | struct ib_event event; | ||
| 533 | |||
| 534 | memset(&event, 0, sizeof event); | ||
| 535 | event.event = IB_EVENT_DEVICE_FATAL; | ||
| 536 | event.device = &dev->ibdev; | ||
| 537 | ib_dispatch_event(&event); | ||
| 527 | c4iw_unregister_device(dev); | 538 | c4iw_unregister_device(dev); |
| 539 | } | ||
| 528 | break; | 540 | break; |
| 529 | case CXGB4_STATE_DETACH: | 541 | case CXGB4_STATE_DETACH: |
| 530 | printk(KERN_INFO MOD "%s: Detach\n", | 542 | printk(KERN_INFO MOD "%s: Detach\n", |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 2fe19ec9ba60..9f6166f59268 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
| @@ -176,7 +176,6 @@ struct c4iw_dev { | |||
| 176 | struct idr mmidr; | 176 | struct idr mmidr; |
| 177 | spinlock_t lock; | 177 | spinlock_t lock; |
| 178 | struct list_head entry; | 178 | struct list_head entry; |
| 179 | struct delayed_work db_drop_task; | ||
| 180 | struct dentry *debugfs_root; | 179 | struct dentry *debugfs_root; |
| 181 | u8 registered; | 180 | u8 registered; |
| 182 | }; | 181 | }; |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 4f0be25cab1a..70a5a3c646da 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -31,9 +31,9 @@ | |||
| 31 | */ | 31 | */ |
| 32 | #include "iw_cxgb4.h" | 32 | #include "iw_cxgb4.h" |
| 33 | 33 | ||
| 34 | static int ocqp_support; | 34 | static int ocqp_support = 1; |
| 35 | module_param(ocqp_support, int, 0644); | 35 | module_param(ocqp_support, int, 0644); |
| 36 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=0)"); | 36 | MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)"); |
| 37 | 37 | ||
| 38 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) | 38 | static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) |
| 39 | { | 39 | { |
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index 70004425d695..24af12fc8228 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
| @@ -507,8 +507,14 @@ static inline void t4_swcq_consume(struct t4_cq *cq) | |||
| 507 | static inline void t4_hwcq_consume(struct t4_cq *cq) | 507 | static inline void t4_hwcq_consume(struct t4_cq *cq) |
| 508 | { | 508 | { |
| 509 | cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; | 509 | cq->bits_type_ts = cq->queue[cq->cidx].bits_type_ts; |
| 510 | if (++cq->cidx_inc == cq->size) | 510 | if (++cq->cidx_inc == (cq->size >> 4)) { |
| 511 | u32 val; | ||
| 512 | |||
| 513 | val = SEINTARM(0) | CIDXINC(cq->cidx_inc) | TIMERREG(7) | | ||
| 514 | INGRESSQID(cq->cqid); | ||
| 515 | writel(val, cq->gts); | ||
| 511 | cq->cidx_inc = 0; | 516 | cq->cidx_inc = 0; |
| 517 | } | ||
| 512 | if (++cq->cidx == cq->size) { | 518 | if (++cq->cidx == cq->size) { |
| 513 | cq->cidx = 0; | 519 | cq->cidx = 0; |
| 514 | cq->gen ^= 1; | 520 | cq->gen ^= 1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_sysfs.c b/drivers/infiniband/hw/ipath/ipath_sysfs.c index b8cb2f145ae4..8991677e9a08 100644 --- a/drivers/infiniband/hw/ipath/ipath_sysfs.c +++ b/drivers/infiniband/hw/ipath/ipath_sysfs.c | |||
| @@ -557,6 +557,7 @@ static ssize_t store_reset(struct device *dev, | |||
| 557 | dev_info(dev,"Unit %d is disabled, can't reset\n", | 557 | dev_info(dev,"Unit %d is disabled, can't reset\n", |
| 558 | dd->ipath_unit); | 558 | dd->ipath_unit); |
| 559 | ret = -EINVAL; | 559 | ret = -EINVAL; |
| 560 | goto bail; | ||
| 560 | } | 561 | } |
| 561 | ret = ipath_reset_device(dd->ipath_unit); | 562 | ret = ipath_reset_device(dd->ipath_unit); |
| 562 | bail: | 563 | bail: |
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h index a550d0c706f3..eb71b8250b91 100644 --- a/drivers/net/cxgb4/t4_msg.h +++ b/drivers/net/cxgb4/t4_msg.h | |||
| @@ -123,6 +123,7 @@ enum { | |||
| 123 | ULP_MODE_NONE = 0, | 123 | ULP_MODE_NONE = 0, |
| 124 | ULP_MODE_ISCSI = 2, | 124 | ULP_MODE_ISCSI = 2, |
| 125 | ULP_MODE_RDMA = 4, | 125 | ULP_MODE_RDMA = 4, |
| 126 | ULP_MODE_TCPDDP = 5, | ||
| 126 | ULP_MODE_FCOE = 6, | 127 | ULP_MODE_FCOE = 6, |
| 127 | }; | 128 | }; |
| 128 | 129 | ||
