diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-15 12:52:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-05-15 12:52:31 -0400 |
commit | de7860c3f3272086a4c3a1b4280b11ffae7c32be (patch) | |
tree | 450f3cae68e5349e853c6bb201facfa60c636a90 | |
parent | cfbf07f2a80b618c42a42c20d83647ea8fcceca0 (diff) | |
parent | 7c5b9ef8577bfa7b74ea58fc9ff2934ffce13532 (diff) |
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB/cm: Optimize stale connection detection
IB/mthca: Set cleaned CQEs back to HW ownership when cleaning CQ
IB/mthca: Fix posting >255 recv WRs for Tavor
RDMA/cma: Add check to validate that cm_id is bound to a device
RDMA/cma: Fix synchronization with device removal in cma_iw_handler
RDMA/cma: Simplify device removal handling code
IB/ehca: Disable scaling code by default, bump version number
IB/ehca: Beautify sysfs attribute code and fix compiler warnings
IB/ehca: Remove _irqsave, move #ifdef
IB/ehca: Fix AQP0/1 QP number
IB/ehca: Correctly set GRH mask bit in ehca_modify_qp()
IB/ehca: Serialize hypervisor calls in ehca_register_mr()
IB/ipath: Shadow the gpio_mask register
IB/mlx4: Fix uninitialized spinlock for 32-bit archs
mlx4_core: Remove unused doorbell_lock
net: Trivial MLX4_DEBUG dependency fix.
-rw-r--r-- | drivers/infiniband/core/cma.c | 106 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes.h | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 94 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 17 | ||||
-rw-r--r-- | drivers/infiniband/hw/ehca/hcp_if.c | 13 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_iba6120.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_intr.c | 7 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_kernel.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/mlx4/main.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_cq.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_qp.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 11 | ||||
-rw-r--r-- | drivers/net/Kconfig | 1 | ||||
-rw-r--r-- | drivers/net/mlx4/main.c | 2 | ||||
-rw-r--r-- | drivers/net/mlx4/mlx4.h | 1 |
17 files changed, 154 insertions, 133 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index fde92ce45153..2eb52b7a71da 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -346,12 +346,33 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
346 | complete(&id_priv->comp); | 346 | complete(&id_priv->comp); |
347 | } | 347 | } |
348 | 348 | ||
349 | static void cma_release_remove(struct rdma_id_private *id_priv) | 349 | static int cma_disable_remove(struct rdma_id_private *id_priv, |
350 | enum cma_state state) | ||
351 | { | ||
352 | unsigned long flags; | ||
353 | int ret; | ||
354 | |||
355 | spin_lock_irqsave(&id_priv->lock, flags); | ||
356 | if (id_priv->state == state) { | ||
357 | atomic_inc(&id_priv->dev_remove); | ||
358 | ret = 0; | ||
359 | } else | ||
360 | ret = -EINVAL; | ||
361 | spin_unlock_irqrestore(&id_priv->lock, flags); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | static void cma_enable_remove(struct rdma_id_private *id_priv) | ||
350 | { | 366 | { |
351 | if (atomic_dec_and_test(&id_priv->dev_remove)) | 367 | if (atomic_dec_and_test(&id_priv->dev_remove)) |
352 | wake_up(&id_priv->wait_remove); | 368 | wake_up(&id_priv->wait_remove); |
353 | } | 369 | } |
354 | 370 | ||
371 | static int cma_has_cm_dev(struct rdma_id_private *id_priv) | ||
372 | { | ||
373 | return (id_priv->id.device && id_priv->cm_id.ib); | ||
374 | } | ||
375 | |||
355 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 376 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
356 | void *context, enum rdma_port_space ps) | 377 | void *context, enum rdma_port_space ps) |
357 | { | 378 | { |
@@ -884,9 +905,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
884 | struct rdma_cm_event event; | 905 | struct rdma_cm_event event; |
885 | int ret = 0; | 906 | int ret = 0; |
886 | 907 | ||
887 | atomic_inc(&id_priv->dev_remove); | 908 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
888 | if (!cma_comp(id_priv, CMA_CONNECT)) | 909 | return 0; |
889 | goto out; | ||
890 | 910 | ||
891 | memset(&event, 0, sizeof event); | 911 | memset(&event, 0, sizeof event); |
892 | switch (ib_event->event) { | 912 | switch (ib_event->event) { |
@@ -942,12 +962,12 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
942 | /* Destroy the CM ID by returning a non-zero value. */ | 962 | /* Destroy the CM ID by returning a non-zero value. */ |
943 | id_priv->cm_id.ib = NULL; | 963 | id_priv->cm_id.ib = NULL; |
944 | cma_exch(id_priv, CMA_DESTROYING); | 964 | cma_exch(id_priv, CMA_DESTROYING); |
945 | cma_release_remove(id_priv); | 965 | cma_enable_remove(id_priv); |
946 | rdma_destroy_id(&id_priv->id); | 966 | rdma_destroy_id(&id_priv->id); |
947 | return ret; | 967 | return ret; |
948 | } | 968 | } |
949 | out: | 969 | out: |
950 | cma_release_remove(id_priv); | 970 | cma_enable_remove(id_priv); |
951 | return ret; | 971 | return ret; |
952 | } | 972 | } |
953 | 973 | ||
@@ -1057,11 +1077,8 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1057 | int offset, ret; | 1077 | int offset, ret; |
1058 | 1078 | ||
1059 | listen_id = cm_id->context; | 1079 | listen_id = cm_id->context; |
1060 | atomic_inc(&listen_id->dev_remove); | 1080 | if (cma_disable_remove(listen_id, CMA_LISTEN)) |
1061 | if (!cma_comp(listen_id, CMA_LISTEN)) { | 1081 | return -ECONNABORTED; |
1062 | ret = -ECONNABORTED; | ||
1063 | goto out; | ||
1064 | } | ||
1065 | 1082 | ||
1066 | memset(&event, 0, sizeof event); | 1083 | memset(&event, 0, sizeof event); |
1067 | offset = cma_user_data_offset(listen_id->id.ps); | 1084 | offset = cma_user_data_offset(listen_id->id.ps); |
@@ -1101,11 +1118,11 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1101 | 1118 | ||
1102 | release_conn_id: | 1119 | release_conn_id: |
1103 | cma_exch(conn_id, CMA_DESTROYING); | 1120 | cma_exch(conn_id, CMA_DESTROYING); |
1104 | cma_release_remove(conn_id); | 1121 | cma_enable_remove(conn_id); |
1105 | rdma_destroy_id(&conn_id->id); | 1122 | rdma_destroy_id(&conn_id->id); |
1106 | 1123 | ||
1107 | out: | 1124 | out: |
1108 | cma_release_remove(listen_id); | 1125 | cma_enable_remove(listen_id); |
1109 | return ret; | 1126 | return ret; |
1110 | } | 1127 | } |
1111 | 1128 | ||
@@ -1171,9 +1188,10 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1171 | struct sockaddr_in *sin; | 1188 | struct sockaddr_in *sin; |
1172 | int ret = 0; | 1189 | int ret = 0; |
1173 | 1190 | ||
1174 | memset(&event, 0, sizeof event); | 1191 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
1175 | atomic_inc(&id_priv->dev_remove); | 1192 | return 0; |
1176 | 1193 | ||
1194 | memset(&event, 0, sizeof event); | ||
1177 | switch (iw_event->event) { | 1195 | switch (iw_event->event) { |
1178 | case IW_CM_EVENT_CLOSE: | 1196 | case IW_CM_EVENT_CLOSE: |
1179 | event.event = RDMA_CM_EVENT_DISCONNECTED; | 1197 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
@@ -1214,12 +1232,12 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1214 | /* Destroy the CM ID by returning a non-zero value. */ | 1232 | /* Destroy the CM ID by returning a non-zero value. */ |
1215 | id_priv->cm_id.iw = NULL; | 1233 | id_priv->cm_id.iw = NULL; |
1216 | cma_exch(id_priv, CMA_DESTROYING); | 1234 | cma_exch(id_priv, CMA_DESTROYING); |
1217 | cma_release_remove(id_priv); | 1235 | cma_enable_remove(id_priv); |
1218 | rdma_destroy_id(&id_priv->id); | 1236 | rdma_destroy_id(&id_priv->id); |
1219 | return ret; | 1237 | return ret; |
1220 | } | 1238 | } |
1221 | 1239 | ||
1222 | cma_release_remove(id_priv); | 1240 | cma_enable_remove(id_priv); |
1223 | return ret; | 1241 | return ret; |
1224 | } | 1242 | } |
1225 | 1243 | ||
@@ -1234,11 +1252,8 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1234 | int ret; | 1252 | int ret; |
1235 | 1253 | ||
1236 | listen_id = cm_id->context; | 1254 | listen_id = cm_id->context; |
1237 | atomic_inc(&listen_id->dev_remove); | 1255 | if (cma_disable_remove(listen_id, CMA_LISTEN)) |
1238 | if (!cma_comp(listen_id, CMA_LISTEN)) { | 1256 | return -ECONNABORTED; |
1239 | ret = -ECONNABORTED; | ||
1240 | goto out; | ||
1241 | } | ||
1242 | 1257 | ||
1243 | /* Create a new RDMA id for the new IW CM ID */ | 1258 | /* Create a new RDMA id for the new IW CM ID */ |
1244 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | 1259 | new_cm_id = rdma_create_id(listen_id->id.event_handler, |
@@ -1255,13 +1270,13 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1255 | dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); | 1270 | dev = ip_dev_find(iw_event->local_addr.sin_addr.s_addr); |
1256 | if (!dev) { | 1271 | if (!dev) { |
1257 | ret = -EADDRNOTAVAIL; | 1272 | ret = -EADDRNOTAVAIL; |
1258 | cma_release_remove(conn_id); | 1273 | cma_enable_remove(conn_id); |
1259 | rdma_destroy_id(new_cm_id); | 1274 | rdma_destroy_id(new_cm_id); |
1260 | goto out; | 1275 | goto out; |
1261 | } | 1276 | } |
1262 | ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); | 1277 | ret = rdma_copy_addr(&conn_id->id.route.addr.dev_addr, dev, NULL); |
1263 | if (ret) { | 1278 | if (ret) { |
1264 | cma_release_remove(conn_id); | 1279 | cma_enable_remove(conn_id); |
1265 | rdma_destroy_id(new_cm_id); | 1280 | rdma_destroy_id(new_cm_id); |
1266 | goto out; | 1281 | goto out; |
1267 | } | 1282 | } |
@@ -1270,7 +1285,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1270 | ret = cma_acquire_dev(conn_id); | 1285 | ret = cma_acquire_dev(conn_id); |
1271 | mutex_unlock(&lock); | 1286 | mutex_unlock(&lock); |
1272 | if (ret) { | 1287 | if (ret) { |
1273 | cma_release_remove(conn_id); | 1288 | cma_enable_remove(conn_id); |
1274 | rdma_destroy_id(new_cm_id); | 1289 | rdma_destroy_id(new_cm_id); |
1275 | goto out; | 1290 | goto out; |
1276 | } | 1291 | } |
@@ -1293,14 +1308,14 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1293 | /* User wants to destroy the CM ID */ | 1308 | /* User wants to destroy the CM ID */ |
1294 | conn_id->cm_id.iw = NULL; | 1309 | conn_id->cm_id.iw = NULL; |
1295 | cma_exch(conn_id, CMA_DESTROYING); | 1310 | cma_exch(conn_id, CMA_DESTROYING); |
1296 | cma_release_remove(conn_id); | 1311 | cma_enable_remove(conn_id); |
1297 | rdma_destroy_id(&conn_id->id); | 1312 | rdma_destroy_id(&conn_id->id); |
1298 | } | 1313 | } |
1299 | 1314 | ||
1300 | out: | 1315 | out: |
1301 | if (dev) | 1316 | if (dev) |
1302 | dev_put(dev); | 1317 | dev_put(dev); |
1303 | cma_release_remove(listen_id); | 1318 | cma_enable_remove(listen_id); |
1304 | return ret; | 1319 | return ret; |
1305 | } | 1320 | } |
1306 | 1321 | ||
@@ -1519,7 +1534,7 @@ static void cma_work_handler(struct work_struct *_work) | |||
1519 | destroy = 1; | 1534 | destroy = 1; |
1520 | } | 1535 | } |
1521 | out: | 1536 | out: |
1522 | cma_release_remove(id_priv); | 1537 | cma_enable_remove(id_priv); |
1523 | cma_deref_id(id_priv); | 1538 | cma_deref_id(id_priv); |
1524 | if (destroy) | 1539 | if (destroy) |
1525 | rdma_destroy_id(&id_priv->id); | 1540 | rdma_destroy_id(&id_priv->id); |
@@ -1711,13 +1726,13 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1711 | 1726 | ||
1712 | if (id_priv->id.event_handler(&id_priv->id, &event)) { | 1727 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
1713 | cma_exch(id_priv, CMA_DESTROYING); | 1728 | cma_exch(id_priv, CMA_DESTROYING); |
1714 | cma_release_remove(id_priv); | 1729 | cma_enable_remove(id_priv); |
1715 | cma_deref_id(id_priv); | 1730 | cma_deref_id(id_priv); |
1716 | rdma_destroy_id(&id_priv->id); | 1731 | rdma_destroy_id(&id_priv->id); |
1717 | return; | 1732 | return; |
1718 | } | 1733 | } |
1719 | out: | 1734 | out: |
1720 | cma_release_remove(id_priv); | 1735 | cma_enable_remove(id_priv); |
1721 | cma_deref_id(id_priv); | 1736 | cma_deref_id(id_priv); |
1722 | } | 1737 | } |
1723 | 1738 | ||
@@ -2042,11 +2057,10 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2042 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 2057 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
2043 | int ret = 0; | 2058 | int ret = 0; |
2044 | 2059 | ||
2045 | memset(&event, 0, sizeof event); | 2060 | if (cma_disable_remove(id_priv, CMA_CONNECT)) |
2046 | atomic_inc(&id_priv->dev_remove); | 2061 | return 0; |
2047 | if (!cma_comp(id_priv, CMA_CONNECT)) | ||
2048 | goto out; | ||
2049 | 2062 | ||
2063 | memset(&event, 0, sizeof event); | ||
2050 | switch (ib_event->event) { | 2064 | switch (ib_event->event) { |
2051 | case IB_CM_SIDR_REQ_ERROR: | 2065 | case IB_CM_SIDR_REQ_ERROR: |
2052 | event.event = RDMA_CM_EVENT_UNREACHABLE; | 2066 | event.event = RDMA_CM_EVENT_UNREACHABLE; |
@@ -2084,12 +2098,12 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2084 | /* Destroy the CM ID by returning a non-zero value. */ | 2098 | /* Destroy the CM ID by returning a non-zero value. */ |
2085 | id_priv->cm_id.ib = NULL; | 2099 | id_priv->cm_id.ib = NULL; |
2086 | cma_exch(id_priv, CMA_DESTROYING); | 2100 | cma_exch(id_priv, CMA_DESTROYING); |
2087 | cma_release_remove(id_priv); | 2101 | cma_enable_remove(id_priv); |
2088 | rdma_destroy_id(&id_priv->id); | 2102 | rdma_destroy_id(&id_priv->id); |
2089 | return ret; | 2103 | return ret; |
2090 | } | 2104 | } |
2091 | out: | 2105 | out: |
2092 | cma_release_remove(id_priv); | 2106 | cma_enable_remove(id_priv); |
2093 | return ret; | 2107 | return ret; |
2094 | } | 2108 | } |
2095 | 2109 | ||
@@ -2413,7 +2427,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event) | |||
2413 | int ret; | 2427 | int ret; |
2414 | 2428 | ||
2415 | id_priv = container_of(id, struct rdma_id_private, id); | 2429 | id_priv = container_of(id, struct rdma_id_private, id); |
2416 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2430 | if (!cma_has_cm_dev(id_priv)) |
2417 | return -EINVAL; | 2431 | return -EINVAL; |
2418 | 2432 | ||
2419 | switch (id->device->node_type) { | 2433 | switch (id->device->node_type) { |
@@ -2435,7 +2449,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2435 | int ret; | 2449 | int ret; |
2436 | 2450 | ||
2437 | id_priv = container_of(id, struct rdma_id_private, id); | 2451 | id_priv = container_of(id, struct rdma_id_private, id); |
2438 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2452 | if (!cma_has_cm_dev(id_priv)) |
2439 | return -EINVAL; | 2453 | return -EINVAL; |
2440 | 2454 | ||
2441 | switch (rdma_node_get_transport(id->device->node_type)) { | 2455 | switch (rdma_node_get_transport(id->device->node_type)) { |
@@ -2466,8 +2480,7 @@ int rdma_disconnect(struct rdma_cm_id *id) | |||
2466 | int ret; | 2480 | int ret; |
2467 | 2481 | ||
2468 | id_priv = container_of(id, struct rdma_id_private, id); | 2482 | id_priv = container_of(id, struct rdma_id_private, id); |
2469 | if (!cma_comp(id_priv, CMA_CONNECT) && | 2483 | if (!cma_has_cm_dev(id_priv)) |
2470 | !cma_comp(id_priv, CMA_DISCONNECT)) | ||
2471 | return -EINVAL; | 2484 | return -EINVAL; |
2472 | 2485 | ||
2473 | switch (rdma_node_get_transport(id->device->node_type)) { | 2486 | switch (rdma_node_get_transport(id->device->node_type)) { |
@@ -2499,10 +2512,9 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2499 | int ret; | 2512 | int ret; |
2500 | 2513 | ||
2501 | id_priv = mc->id_priv; | 2514 | id_priv = mc->id_priv; |
2502 | atomic_inc(&id_priv->dev_remove); | 2515 | if (cma_disable_remove(id_priv, CMA_ADDR_BOUND) && |
2503 | if (!cma_comp(id_priv, CMA_ADDR_BOUND) && | 2516 | cma_disable_remove(id_priv, CMA_ADDR_RESOLVED)) |
2504 | !cma_comp(id_priv, CMA_ADDR_RESOLVED)) | 2517 | return 0; |
2505 | goto out; | ||
2506 | 2518 | ||
2507 | if (!status && id_priv->id.qp) | 2519 | if (!status && id_priv->id.qp) |
2508 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, | 2520 | status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, |
@@ -2524,12 +2536,12 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2524 | ret = id_priv->id.event_handler(&id_priv->id, &event); | 2536 | ret = id_priv->id.event_handler(&id_priv->id, &event); |
2525 | if (ret) { | 2537 | if (ret) { |
2526 | cma_exch(id_priv, CMA_DESTROYING); | 2538 | cma_exch(id_priv, CMA_DESTROYING); |
2527 | cma_release_remove(id_priv); | 2539 | cma_enable_remove(id_priv); |
2528 | rdma_destroy_id(&id_priv->id); | 2540 | rdma_destroy_id(&id_priv->id); |
2529 | return 0; | 2541 | return 0; |
2530 | } | 2542 | } |
2531 | out: | 2543 | |
2532 | cma_release_remove(id_priv); | 2544 | cma_enable_remove(id_priv); |
2533 | return 0; | 2545 | return 0; |
2534 | } | 2546 | } |
2535 | 2547 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h index f64d42b08674..1d286d3cc2d5 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes.h +++ b/drivers/infiniband/hw/ehca/ehca_classes.h | |||
@@ -277,6 +277,7 @@ void ehca_cleanup_mrmw_cache(void); | |||
277 | 277 | ||
278 | extern spinlock_t ehca_qp_idr_lock; | 278 | extern spinlock_t ehca_qp_idr_lock; |
279 | extern spinlock_t ehca_cq_idr_lock; | 279 | extern spinlock_t ehca_cq_idr_lock; |
280 | extern spinlock_t hcall_lock; | ||
280 | extern struct idr ehca_qp_idr; | 281 | extern struct idr ehca_qp_idr; |
281 | extern struct idr ehca_cq_idr; | 282 | extern struct idr ehca_cq_idr; |
282 | 283 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 82dda2faf4d0..100329ba3343 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
@@ -517,12 +517,11 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
517 | else { | 517 | else { |
518 | struct ehca_cq *cq = eq->eqe_cache[i].cq; | 518 | struct ehca_cq *cq = eq->eqe_cache[i].cq; |
519 | comp_event_callback(cq); | 519 | comp_event_callback(cq); |
520 | spin_lock_irqsave(&ehca_cq_idr_lock, flags); | 520 | spin_lock(&ehca_cq_idr_lock); |
521 | cq->nr_events--; | 521 | cq->nr_events--; |
522 | if (!cq->nr_events) | 522 | if (!cq->nr_events) |
523 | wake_up(&cq->wait_completion); | 523 | wake_up(&cq->wait_completion); |
524 | spin_unlock_irqrestore(&ehca_cq_idr_lock, | 524 | spin_unlock(&ehca_cq_idr_lock); |
525 | flags); | ||
526 | } | 525 | } |
527 | } else { | 526 | } else { |
528 | ehca_dbg(&shca->ib_device, "Got non completion event"); | 527 | ehca_dbg(&shca->ib_device, "Got non completion event"); |
@@ -711,6 +710,7 @@ static void destroy_comp_task(struct ehca_comp_pool *pool, | |||
711 | kthread_stop(task); | 710 | kthread_stop(task); |
712 | } | 711 | } |
713 | 712 | ||
713 | #ifdef CONFIG_HOTPLUG_CPU | ||
714 | static void take_over_work(struct ehca_comp_pool *pool, | 714 | static void take_over_work(struct ehca_comp_pool *pool, |
715 | int cpu) | 715 | int cpu) |
716 | { | 716 | { |
@@ -735,7 +735,6 @@ static void take_over_work(struct ehca_comp_pool *pool, | |||
735 | 735 | ||
736 | } | 736 | } |
737 | 737 | ||
738 | #ifdef CONFIG_HOTPLUG_CPU | ||
739 | static int comp_pool_callback(struct notifier_block *nfb, | 738 | static int comp_pool_callback(struct notifier_block *nfb, |
740 | unsigned long action, | 739 | unsigned long action, |
741 | void *hcpu) | 740 | void *hcpu) |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index fe90e7454560..c3f99f33b49c 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
@@ -52,7 +52,7 @@ | |||
52 | MODULE_LICENSE("Dual BSD/GPL"); | 52 | MODULE_LICENSE("Dual BSD/GPL"); |
53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 53 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); | 54 | MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver"); |
55 | MODULE_VERSION("SVNEHCA_0022"); | 55 | MODULE_VERSION("SVNEHCA_0023"); |
56 | 56 | ||
57 | int ehca_open_aqp1 = 0; | 57 | int ehca_open_aqp1 = 0; |
58 | int ehca_debug_level = 0; | 58 | int ehca_debug_level = 0; |
@@ -62,7 +62,7 @@ int ehca_use_hp_mr = 0; | |||
62 | int ehca_port_act_time = 30; | 62 | int ehca_port_act_time = 30; |
63 | int ehca_poll_all_eqs = 1; | 63 | int ehca_poll_all_eqs = 1; |
64 | int ehca_static_rate = -1; | 64 | int ehca_static_rate = -1; |
65 | int ehca_scaling_code = 1; | 65 | int ehca_scaling_code = 0; |
66 | 66 | ||
67 | module_param_named(open_aqp1, ehca_open_aqp1, int, 0); | 67 | module_param_named(open_aqp1, ehca_open_aqp1, int, 0); |
68 | module_param_named(debug_level, ehca_debug_level, int, 0); | 68 | module_param_named(debug_level, ehca_debug_level, int, 0); |
@@ -98,6 +98,7 @@ MODULE_PARM_DESC(scaling_code, | |||
98 | 98 | ||
99 | spinlock_t ehca_qp_idr_lock; | 99 | spinlock_t ehca_qp_idr_lock; |
100 | spinlock_t ehca_cq_idr_lock; | 100 | spinlock_t ehca_cq_idr_lock; |
101 | spinlock_t hcall_lock; | ||
101 | DEFINE_IDR(ehca_qp_idr); | 102 | DEFINE_IDR(ehca_qp_idr); |
102 | DEFINE_IDR(ehca_cq_idr); | 103 | DEFINE_IDR(ehca_cq_idr); |
103 | 104 | ||
@@ -453,15 +454,14 @@ static ssize_t ehca_store_debug_level(struct device_driver *ddp, | |||
453 | DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, | 454 | DRIVER_ATTR(debug_level, S_IRUSR | S_IWUSR, |
454 | ehca_show_debug_level, ehca_store_debug_level); | 455 | ehca_show_debug_level, ehca_store_debug_level); |
455 | 456 | ||
456 | void ehca_create_driver_sysfs(struct ibmebus_driver *drv) | 457 | static struct attribute *ehca_drv_attrs[] = { |
457 | { | 458 | &driver_attr_debug_level.attr, |
458 | driver_create_file(&drv->driver, &driver_attr_debug_level); | 459 | NULL |
459 | } | 460 | }; |
460 | 461 | ||
461 | void ehca_remove_driver_sysfs(struct ibmebus_driver *drv) | 462 | static struct attribute_group ehca_drv_attr_grp = { |
462 | { | 463 | .attrs = ehca_drv_attrs |
463 | driver_remove_file(&drv->driver, &driver_attr_debug_level); | 464 | }; |
464 | } | ||
465 | 465 | ||
466 | #define EHCA_RESOURCE_ATTR(name) \ | 466 | #define EHCA_RESOURCE_ATTR(name) \ |
467 | static ssize_t ehca_show_##name(struct device *dev, \ | 467 | static ssize_t ehca_show_##name(struct device *dev, \ |
@@ -523,44 +523,28 @@ static ssize_t ehca_show_adapter_handle(struct device *dev, | |||
523 | } | 523 | } |
524 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); | 524 | static DEVICE_ATTR(adapter_handle, S_IRUGO, ehca_show_adapter_handle, NULL); |
525 | 525 | ||
526 | static struct attribute *ehca_dev_attrs[] = { | ||
527 | &dev_attr_adapter_handle.attr, | ||
528 | &dev_attr_num_ports.attr, | ||
529 | &dev_attr_hw_ver.attr, | ||
530 | &dev_attr_max_eq.attr, | ||
531 | &dev_attr_cur_eq.attr, | ||
532 | &dev_attr_max_cq.attr, | ||
533 | &dev_attr_cur_cq.attr, | ||
534 | &dev_attr_max_qp.attr, | ||
535 | &dev_attr_cur_qp.attr, | ||
536 | &dev_attr_max_mr.attr, | ||
537 | &dev_attr_cur_mr.attr, | ||
538 | &dev_attr_max_mw.attr, | ||
539 | &dev_attr_cur_mw.attr, | ||
540 | &dev_attr_max_pd.attr, | ||
541 | &dev_attr_max_ah.attr, | ||
542 | NULL | ||
543 | }; | ||
526 | 544 | ||
527 | void ehca_create_device_sysfs(struct ibmebus_dev *dev) | 545 | static struct attribute_group ehca_dev_attr_grp = { |
528 | { | 546 | .attrs = ehca_dev_attrs |
529 | device_create_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | 547 | }; |
530 | device_create_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
531 | device_create_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
532 | device_create_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
533 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
534 | device_create_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
535 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
536 | device_create_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
537 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
538 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
539 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
540 | device_create_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
541 | device_create_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
542 | device_create_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
543 | device_create_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
544 | } | ||
545 | |||
546 | void ehca_remove_device_sysfs(struct ibmebus_dev *dev) | ||
547 | { | ||
548 | device_remove_file(&dev->ofdev.dev, &dev_attr_adapter_handle); | ||
549 | device_remove_file(&dev->ofdev.dev, &dev_attr_num_ports); | ||
550 | device_remove_file(&dev->ofdev.dev, &dev_attr_hw_ver); | ||
551 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_eq); | ||
552 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_eq); | ||
553 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_cq); | ||
554 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_cq); | ||
555 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_qp); | ||
556 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_qp); | ||
557 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mr); | ||
558 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mr); | ||
559 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_mw); | ||
560 | device_remove_file(&dev->ofdev.dev, &dev_attr_cur_mw); | ||
561 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_pd); | ||
562 | device_remove_file(&dev->ofdev.dev, &dev_attr_max_ah); | ||
563 | } | ||
564 | 548 | ||
565 | static int __devinit ehca_probe(struct ibmebus_dev *dev, | 549 | static int __devinit ehca_probe(struct ibmebus_dev *dev, |
566 | const struct of_device_id *id) | 550 | const struct of_device_id *id) |
@@ -668,7 +652,10 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev, | |||
668 | } | 652 | } |
669 | } | 653 | } |
670 | 654 | ||
671 | ehca_create_device_sysfs(dev); | 655 | ret = sysfs_create_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); |
656 | if (ret) /* only complain; we can live without attributes */ | ||
657 | ehca_err(&shca->ib_device, | ||
658 | "Cannot create device attributes ret=%d", ret); | ||
672 | 659 | ||
673 | spin_lock(&shca_list_lock); | 660 | spin_lock(&shca_list_lock); |
674 | list_add(&shca->shca_list, &shca_list); | 661 | list_add(&shca->shca_list, &shca_list); |
@@ -720,7 +707,7 @@ static int __devexit ehca_remove(struct ibmebus_dev *dev) | |||
720 | struct ehca_shca *shca = dev->ofdev.dev.driver_data; | 707 | struct ehca_shca *shca = dev->ofdev.dev.driver_data; |
721 | int ret; | 708 | int ret; |
722 | 709 | ||
723 | ehca_remove_device_sysfs(dev); | 710 | sysfs_remove_group(&dev->ofdev.dev.kobj, &ehca_dev_attr_grp); |
724 | 711 | ||
725 | if (ehca_open_aqp1 == 1) { | 712 | if (ehca_open_aqp1 == 1) { |
726 | int i; | 713 | int i; |
@@ -812,11 +799,12 @@ int __init ehca_module_init(void) | |||
812 | int ret; | 799 | int ret; |
813 | 800 | ||
814 | printk(KERN_INFO "eHCA Infiniband Device Driver " | 801 | printk(KERN_INFO "eHCA Infiniband Device Driver " |
815 | "(Rel.: SVNEHCA_0022)\n"); | 802 | "(Rel.: SVNEHCA_0023)\n"); |
816 | idr_init(&ehca_qp_idr); | 803 | idr_init(&ehca_qp_idr); |
817 | idr_init(&ehca_cq_idr); | 804 | idr_init(&ehca_cq_idr); |
818 | spin_lock_init(&ehca_qp_idr_lock); | 805 | spin_lock_init(&ehca_qp_idr_lock); |
819 | spin_lock_init(&ehca_cq_idr_lock); | 806 | spin_lock_init(&ehca_cq_idr_lock); |
807 | spin_lock_init(&hcall_lock); | ||
820 | 808 | ||
821 | INIT_LIST_HEAD(&shca_list); | 809 | INIT_LIST_HEAD(&shca_list); |
822 | spin_lock_init(&shca_list_lock); | 810 | spin_lock_init(&shca_list_lock); |
@@ -838,7 +826,9 @@ int __init ehca_module_init(void) | |||
838 | goto module_init2; | 826 | goto module_init2; |
839 | } | 827 | } |
840 | 828 | ||
841 | ehca_create_driver_sysfs(&ehca_driver); | 829 | ret = sysfs_create_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp); |
830 | if (ret) /* only complain; we can live without attributes */ | ||
831 | ehca_gen_err("Cannot create driver attributes ret=%d", ret); | ||
842 | 832 | ||
843 | if (ehca_poll_all_eqs != 1) { | 833 | if (ehca_poll_all_eqs != 1) { |
844 | ehca_gen_err("WARNING!!!"); | 834 | ehca_gen_err("WARNING!!!"); |
@@ -865,7 +855,7 @@ void __exit ehca_module_exit(void) | |||
865 | if (ehca_poll_all_eqs == 1) | 855 | if (ehca_poll_all_eqs == 1) |
866 | del_timer_sync(&poll_eqs_timer); | 856 | del_timer_sync(&poll_eqs_timer); |
867 | 857 | ||
868 | ehca_remove_driver_sysfs(&ehca_driver); | 858 | sysfs_remove_group(&ehca_driver.driver.kobj, &ehca_drv_attr_grp); |
869 | ibmebus_unregister_driver(&ehca_driver); | 859 | ibmebus_unregister_driver(&ehca_driver); |
870 | 860 | ||
871 | ehca_destroy_slab_caches(); | 861 | ehca_destroy_slab_caches(); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index df0516f24379..b5bc787c77b6 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
@@ -523,6 +523,8 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
523 | goto create_qp_exit1; | 523 | goto create_qp_exit1; |
524 | } | 524 | } |
525 | 525 | ||
526 | my_qp->ib_qp.qp_num = my_qp->real_qp_num; | ||
527 | |||
526 | switch (init_attr->qp_type) { | 528 | switch (init_attr->qp_type) { |
527 | case IB_QPT_RC: | 529 | case IB_QPT_RC: |
528 | if (isdaqp == 0) { | 530 | if (isdaqp == 0) { |
@@ -568,7 +570,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
568 | parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; | 570 | parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; |
569 | parms.act_nr_send_sges = init_attr->cap.max_send_sge; | 571 | parms.act_nr_send_sges = init_attr->cap.max_send_sge; |
570 | parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; | 572 | parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; |
571 | my_qp->real_qp_num = | 573 | my_qp->ib_qp.qp_num = |
572 | (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; | 574 | (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1; |
573 | } | 575 | } |
574 | 576 | ||
@@ -595,7 +597,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd, | |||
595 | my_qp->ib_qp.recv_cq = init_attr->recv_cq; | 597 | my_qp->ib_qp.recv_cq = init_attr->recv_cq; |
596 | my_qp->ib_qp.send_cq = init_attr->send_cq; | 598 | my_qp->ib_qp.send_cq = init_attr->send_cq; |
597 | 599 | ||
598 | my_qp->ib_qp.qp_num = my_qp->real_qp_num; | ||
599 | my_qp->ib_qp.qp_type = init_attr->qp_type; | 600 | my_qp->ib_qp.qp_type = init_attr->qp_type; |
600 | 601 | ||
601 | my_qp->qp_type = init_attr->qp_type; | 602 | my_qp->qp_type = init_attr->qp_type; |
@@ -968,17 +969,21 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
968 | ((ehca_mult - 1) / ah_mult) : 0; | 969 | ((ehca_mult - 1) / ah_mult) : 0; |
969 | else | 970 | else |
970 | mqpcb->max_static_rate = 0; | 971 | mqpcb->max_static_rate = 0; |
971 | |||
972 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); | 972 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_MAX_STATIC_RATE, 1); |
973 | 973 | ||
974 | /* | 974 | /* |
975 | * Always supply the GRH flag, even if it's zero, to give the | ||
976 | * hypervisor a clear "yes" or "no" instead of a "perhaps" | ||
977 | */ | ||
978 | update_mask |= EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); | ||
979 | |||
980 | /* | ||
975 | * only if GRH is TRUE we might consider SOURCE_GID_IDX | 981 | * only if GRH is TRUE we might consider SOURCE_GID_IDX |
976 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! | 982 | * and DEST_GID otherwise phype will return H_ATTR_PARM!!! |
977 | */ | 983 | */ |
978 | if (attr->ah_attr.ah_flags == IB_AH_GRH) { | 984 | if (attr->ah_attr.ah_flags == IB_AH_GRH) { |
979 | mqpcb->send_grh_flag = 1 << 31; | 985 | mqpcb->send_grh_flag = 1; |
980 | update_mask |= | 986 | |
981 | EHCA_BMASK_SET(MQPCB_MASK_SEND_GRH_FLAG, 1); | ||
982 | mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; | 987 | mqpcb->source_gid_idx = attr->ah_attr.grh.sgid_index; |
983 | update_mask |= | 988 | update_mask |= |
984 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); | 989 | EHCA_BMASK_SET(MQPCB_MASK_SOURCE_GID_IDX, 1); |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index b564fcd3b282..7f0beec74f70 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
@@ -154,7 +154,8 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
154 | unsigned long arg9) | 154 | unsigned long arg9) |
155 | { | 155 | { |
156 | long ret; | 156 | long ret; |
157 | int i, sleep_msecs; | 157 | int i, sleep_msecs, lock_is_set = 0; |
158 | unsigned long flags; | ||
158 | 159 | ||
159 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " | 160 | ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " |
160 | "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", | 161 | "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", |
@@ -162,10 +163,18 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
162 | arg8, arg9); | 163 | arg8, arg9); |
163 | 164 | ||
164 | for (i = 0; i < 5; i++) { | 165 | for (i = 0; i < 5; i++) { |
166 | if ((opcode == H_ALLOC_RESOURCE) && (arg2 == 5)) { | ||
167 | spin_lock_irqsave(&hcall_lock, flags); | ||
168 | lock_is_set = 1; | ||
169 | } | ||
170 | |||
165 | ret = plpar_hcall9(opcode, outs, | 171 | ret = plpar_hcall9(opcode, outs, |
166 | arg1, arg2, arg3, arg4, arg5, | 172 | arg1, arg2, arg3, arg4, arg5, |
167 | arg6, arg7, arg8, arg9); | 173 | arg6, arg7, arg8, arg9); |
168 | 174 | ||
175 | if (lock_is_set) | ||
176 | spin_unlock_irqrestore(&hcall_lock, flags); | ||
177 | |||
169 | if (H_IS_LONG_BUSY(ret)) { | 178 | if (H_IS_LONG_BUSY(ret)) { |
170 | sleep_msecs = get_longbusy_msecs(ret); | 179 | sleep_msecs = get_longbusy_msecs(ret); |
171 | msleep_interruptible(sleep_msecs); | 180 | msleep_interruptible(sleep_msecs); |
@@ -193,11 +202,11 @@ static long ehca_plpar_hcall9(unsigned long opcode, | |||
193 | opcode, ret, outs[0], outs[1], outs[2], outs[3], | 202 | opcode, ret, outs[0], outs[1], outs[2], outs[3], |
194 | outs[4], outs[5], outs[6], outs[7], outs[8]); | 203 | outs[4], outs[5], outs[6], outs[7], outs[8]); |
195 | return ret; | 204 | return ret; |
196 | |||
197 | } | 205 | } |
198 | 206 | ||
199 | return H_BUSY; | 207 | return H_BUSY; |
200 | } | 208 | } |
209 | |||
201 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, | 210 | u64 hipz_h_alloc_resource_eq(const struct ipz_adapter_handle adapter_handle, |
202 | struct ehca_pfeq *pfeq, | 211 | struct ehca_pfeq *pfeq, |
203 | const u32 neq_control, | 212 | const u32 neq_control, |
diff --git a/drivers/infiniband/hw/ipath/ipath_iba6120.c b/drivers/infiniband/hw/ipath/ipath_iba6120.c index 1b9c30857754..4e2e3dfeb2c8 100644 --- a/drivers/infiniband/hw/ipath/ipath_iba6120.c +++ b/drivers/infiniband/hw/ipath/ipath_iba6120.c | |||
@@ -747,7 +747,6 @@ static void ipath_pe_quiet_serdes(struct ipath_devdata *dd) | |||
747 | 747 | ||
748 | static int ipath_pe_intconfig(struct ipath_devdata *dd) | 748 | static int ipath_pe_intconfig(struct ipath_devdata *dd) |
749 | { | 749 | { |
750 | u64 val; | ||
751 | u32 chiprev; | 750 | u32 chiprev; |
752 | 751 | ||
753 | /* | 752 | /* |
@@ -760,9 +759,9 @@ static int ipath_pe_intconfig(struct ipath_devdata *dd) | |||
760 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { | 759 | if ((chiprev & INFINIPATH_R_CHIPREVMINOR_MASK) > 1) { |
761 | /* Rev2+ reports extra errors via internal GPIO pins */ | 760 | /* Rev2+ reports extra errors via internal GPIO pins */ |
762 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; | 761 | dd->ipath_flags |= IPATH_GPIO_ERRINTRS; |
763 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 762 | dd->ipath_gpio_mask |= IPATH_GPIO_ERRINTR_MASK; |
764 | val |= IPATH_GPIO_ERRINTR_MASK; | 763 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
765 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 764 | dd->ipath_gpio_mask); |
766 | } | 765 | } |
767 | return 0; | 766 | return 0; |
768 | } | 767 | } |
diff --git a/drivers/infiniband/hw/ipath/ipath_intr.c b/drivers/infiniband/hw/ipath/ipath_intr.c index 45d033169c6e..a90d3b5699c4 100644 --- a/drivers/infiniband/hw/ipath/ipath_intr.c +++ b/drivers/infiniband/hw/ipath/ipath_intr.c | |||
@@ -1056,7 +1056,7 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1056 | gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); | 1056 | gpiostatus &= ~(1 << IPATH_GPIO_PORT0_BIT); |
1057 | chk0rcv = 1; | 1057 | chk0rcv = 1; |
1058 | } | 1058 | } |
1059 | if (unlikely(gpiostatus)) { | 1059 | if (gpiostatus) { |
1060 | /* | 1060 | /* |
1061 | * Some unexpected bits remain. If they could have | 1061 | * Some unexpected bits remain. If they could have |
1062 | * caused the interrupt, complain and clear. | 1062 | * caused the interrupt, complain and clear. |
@@ -1065,9 +1065,8 @@ irqreturn_t ipath_intr(int irq, void *data) | |||
1065 | * GPIO interrupts, possibly on a "three strikes" | 1065 | * GPIO interrupts, possibly on a "three strikes" |
1066 | * basis. | 1066 | * basis. |
1067 | */ | 1067 | */ |
1068 | u32 mask; | 1068 | const u32 mask = (u32) dd->ipath_gpio_mask; |
1069 | mask = ipath_read_kreg32( | 1069 | |
1070 | dd, dd->ipath_kregs->kr_gpio_mask); | ||
1071 | if (mask & gpiostatus) { | 1070 | if (mask & gpiostatus) { |
1072 | ipath_dbg("Unexpected GPIO IRQ bits %x\n", | 1071 | ipath_dbg("Unexpected GPIO IRQ bits %x\n", |
1073 | gpiostatus & mask); | 1072 | gpiostatus & mask); |
diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index e900c2593f44..12194f3dd8cc 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h | |||
@@ -397,6 +397,8 @@ struct ipath_devdata { | |||
397 | unsigned long ipath_pioavailshadow[8]; | 397 | unsigned long ipath_pioavailshadow[8]; |
398 | /* shadow of kr_gpio_out, for rmw ops */ | 398 | /* shadow of kr_gpio_out, for rmw ops */ |
399 | u64 ipath_gpio_out; | 399 | u64 ipath_gpio_out; |
400 | /* shadow the gpio mask register */ | ||
401 | u64 ipath_gpio_mask; | ||
400 | /* kr_revision shadow */ | 402 | /* kr_revision shadow */ |
401 | u64 ipath_revision; | 403 | u64 ipath_revision; |
402 | /* | 404 | /* |
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 12933e77c7e9..bb70845279b8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c | |||
@@ -1387,13 +1387,12 @@ static int enable_timer(struct ipath_devdata *dd) | |||
1387 | * processing. | 1387 | * processing. |
1388 | */ | 1388 | */ |
1389 | if (dd->ipath_flags & IPATH_GPIO_INTR) { | 1389 | if (dd->ipath_flags & IPATH_GPIO_INTR) { |
1390 | u64 val; | ||
1391 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, | 1390 | ipath_write_kreg(dd, dd->ipath_kregs->kr_debugportselect, |
1392 | 0x2074076542310ULL); | 1391 | 0x2074076542310ULL); |
1393 | /* Enable GPIO bit 2 interrupt */ | 1392 | /* Enable GPIO bit 2 interrupt */ |
1394 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 1393 | dd->ipath_gpio_mask |= (u64) (1 << IPATH_GPIO_PORT0_BIT); |
1395 | val |= (u64) (1 << IPATH_GPIO_PORT0_BIT); | 1394 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
1396 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 1395 | dd->ipath_gpio_mask); |
1397 | } | 1396 | } |
1398 | 1397 | ||
1399 | init_timer(&dd->verbs_timer); | 1398 | init_timer(&dd->verbs_timer); |
@@ -1412,8 +1411,9 @@ static int disable_timer(struct ipath_devdata *dd) | |||
1412 | u64 val; | 1411 | u64 val; |
1413 | /* Disable GPIO bit 2 interrupt */ | 1412 | /* Disable GPIO bit 2 interrupt */ |
1414 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); | 1413 | val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_gpio_mask); |
1415 | val &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); | 1414 | dd->ipath_gpio_mask &= ~((u64) (1 << IPATH_GPIO_PORT0_BIT)); |
1416 | ipath_write_kreg( dd, dd->ipath_kregs->kr_gpio_mask, val); | 1415 | ipath_write_kreg(dd, dd->ipath_kregs->kr_gpio_mask, |
1416 | dd->ipath_gpio_mask); | ||
1417 | /* | 1417 | /* |
1418 | * We might want to undo changes to debugportselect, | 1418 | * We might want to undo changes to debugportselect, |
1419 | * but how? | 1419 | * but how? |
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 688ecb4c39f3..402f3a20ec0a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c | |||
@@ -489,6 +489,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) | |||
489 | ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); | 489 | ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE); |
490 | if (!ibdev->uar_map) | 490 | if (!ibdev->uar_map) |
491 | goto err_uar; | 491 | goto err_uar; |
492 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); | ||
492 | 493 | ||
493 | INIT_LIST_HEAD(&ibdev->pgdir_list); | 494 | INIT_LIST_HEAD(&ibdev->pgdir_list); |
494 | mutex_init(&ibdev->pgdir_mutex); | 495 | mutex_init(&ibdev->pgdir_mutex); |
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index cf0868f6e965..ca224d018af2 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c | |||
@@ -284,7 +284,7 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, | |||
284 | { | 284 | { |
285 | struct mthca_cqe *cqe; | 285 | struct mthca_cqe *cqe; |
286 | u32 prod_index; | 286 | u32 prod_index; |
287 | int nfreed = 0; | 287 | int i, nfreed = 0; |
288 | 288 | ||
289 | spin_lock_irq(&cq->lock); | 289 | spin_lock_irq(&cq->lock); |
290 | 290 | ||
@@ -321,6 +321,8 @@ void mthca_cq_clean(struct mthca_dev *dev, struct mthca_cq *cq, u32 qpn, | |||
321 | } | 321 | } |
322 | 322 | ||
323 | if (nfreed) { | 323 | if (nfreed) { |
324 | for (i = 0; i < nfreed; ++i) | ||
325 | set_cqe_hw(get_cqe(cq, (cq->cons_index + i) & cq->ibcq.cqe)); | ||
324 | wmb(); | 326 | wmb(); |
325 | cq->cons_index += nfreed; | 327 | cq->cons_index += nfreed; |
326 | update_cons_index(dev, cq, nfreed); | 328 | update_cons_index(dev, cq, nfreed); |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index fee60c852d14..72fabb822f1c 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1862,6 +1862,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1862 | dev->kar + MTHCA_RECEIVE_DOORBELL, | 1862 | dev->kar + MTHCA_RECEIVE_DOORBELL, |
1863 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | 1863 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); |
1864 | 1864 | ||
1865 | qp->rq.next_ind = ind; | ||
1865 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | 1866 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; |
1866 | size0 = 0; | 1867 | size0 = 0; |
1867 | } | 1868 | } |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 785bc8505f2a..eec833b81e9b 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -257,10 +257,11 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even | |||
257 | cm_id->context = p; | 257 | cm_id->context = p; |
258 | p->jiffies = jiffies; | 258 | p->jiffies = jiffies; |
259 | spin_lock_irq(&priv->lock); | 259 | spin_lock_irq(&priv->lock); |
260 | if (list_empty(&priv->cm.passive_ids)) | ||
261 | queue_delayed_work(ipoib_workqueue, | ||
262 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
260 | list_add(&p->list, &priv->cm.passive_ids); | 263 | list_add(&p->list, &priv->cm.passive_ids); |
261 | spin_unlock_irq(&priv->lock); | 264 | spin_unlock_irq(&priv->lock); |
262 | queue_delayed_work(ipoib_workqueue, | ||
263 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
264 | return 0; | 265 | return 0; |
265 | 266 | ||
266 | err_rep: | 267 | err_rep: |
@@ -378,8 +379,6 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) | |||
378 | if (!list_empty(&p->list)) | 379 | if (!list_empty(&p->list)) |
379 | list_move(&p->list, &priv->cm.passive_ids); | 380 | list_move(&p->list, &priv->cm.passive_ids); |
380 | spin_unlock_irqrestore(&priv->lock, flags); | 381 | spin_unlock_irqrestore(&priv->lock, flags); |
381 | queue_delayed_work(ipoib_workqueue, | ||
382 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
383 | } | 382 | } |
384 | } | 383 | } |
385 | 384 | ||
@@ -1100,6 +1099,10 @@ static void ipoib_cm_stale_task(struct work_struct *work) | |||
1100 | kfree(p); | 1099 | kfree(p); |
1101 | spin_lock_irq(&priv->lock); | 1100 | spin_lock_irq(&priv->lock); |
1102 | } | 1101 | } |
1102 | |||
1103 | if (!list_empty(&priv->cm.passive_ids)) | ||
1104 | queue_delayed_work(ipoib_workqueue, | ||
1105 | &priv->cm.stale_task, IPOIB_CM_RX_DELAY); | ||
1103 | spin_unlock_irq(&priv->lock); | 1106 | spin_unlock_irq(&priv->lock); |
1104 | } | 1107 | } |
1105 | 1108 | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index fb99cd445504..c5baa197bc08 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -2508,6 +2508,7 @@ config MLX4_CORE | |||
2508 | 2508 | ||
2509 | config MLX4_DEBUG | 2509 | config MLX4_DEBUG |
2510 | bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) | 2510 | bool "Verbose debugging output" if (MLX4_CORE && EMBEDDED) |
2511 | depends on MLX4_CORE | ||
2511 | default y | 2512 | default y |
2512 | ---help--- | 2513 | ---help--- |
2513 | This option causes debugging code to be compiled into the | 2514 | This option causes debugging code to be compiled into the |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 4debb024eaf9..20b8c0d3ced4 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
@@ -542,8 +542,6 @@ static int __devinit mlx4_setup_hca(struct mlx4_dev *dev) | |||
542 | struct mlx4_priv *priv = mlx4_priv(dev); | 542 | struct mlx4_priv *priv = mlx4_priv(dev); |
543 | int err; | 543 | int err; |
544 | 544 | ||
545 | MLX4_INIT_DOORBELL_LOCK(&priv->doorbell_lock); | ||
546 | |||
547 | err = mlx4_init_uar_table(dev); | 545 | err = mlx4_init_uar_table(dev); |
548 | if (err) { | 546 | if (err) { |
549 | mlx4_err(dev, "Failed to initialize " | 547 | mlx4_err(dev, "Failed to initialize " |
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h index 9befbae3d196..3d3b6d24d8d3 100644 --- a/drivers/net/mlx4/mlx4.h +++ b/drivers/net/mlx4/mlx4.h | |||
@@ -275,7 +275,6 @@ struct mlx4_priv { | |||
275 | 275 | ||
276 | struct mlx4_uar driver_uar; | 276 | struct mlx4_uar driver_uar; |
277 | void __iomem *kar; | 277 | void __iomem *kar; |
278 | MLX4_DECLARE_DOORBELL_LOCK(doorbell_lock) | ||
279 | 278 | ||
280 | u32 rev_id; | 279 | u32 rev_id; |
281 | char board_id[MLX4_BOARD_ID_LEN]; | 280 | char board_id[MLX4_BOARD_ID_LEN]; |