aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 17:50:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-22 17:50:12 -0400
commitece236ce2fad9c27a6fd2530f899289025194bce (patch)
tree474b793205872206a2a3f7d409ff9b1f81f3a9a8
parent441c196e84b11aad3123baa9320eee7abc6b5c98 (diff)
parent4460207561290c3be7e6c7538f22690028170c1d (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (26 commits) IB/qib: Defer HCA error events to tasklet mlx4_core: Bump the driver version to 1.0 RDMA/cxgb4: Use printk_ratelimited() instead of printk_ratelimit() IB/mlx4: Support PMA counters for IBoE IB/mlx4: Use flow counters on IBoE ports IB/pma: Add include file for IBA performance counters definitions mlx4_core: Add network flow counters mlx4_core: Fix location of counter index in QP context struct mlx4_core: Read extended capabilities into the flags field mlx4_core: Extend capability flags to 64 bits IB/mlx4: Generate GID change events in IBoE code IB/core: Add GID change event RDMA/cma: Don't allow IPoIB port space for IBoE RDMA: Allow for NULL .modify_device() and .modify_port() methods IB/qib: Update active link width IB/qib: Fix potential deadlock with link down interrupt IB/qib: Add sysfs interface to read free contexts IB/mthca: Remove unnecessary read of PCI_CAP_ID_EXP IB/qib: Remove double define IB/qib: Remove unnecessary read of PCI_CAP_ID_EXP ...
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/cma.c84
-rw-r--r--drivers/infiniband/core/device.c6
-rw-r--r--drivers/infiniband/hw/amso1100/c2_provider.c9
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/resource.c9
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c11
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mad.c198
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c68
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c10
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c276
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.h93
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mad.c15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c175
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mcg.c101
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c43
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c35
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c77
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c49
-rw-r--r--drivers/infiniband/hw/mthca/mthca_reset.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c33
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c11
-rw-r--r--drivers/infiniband/hw/qib/qib.h3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c16
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c72
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.c78
-rw-r--r--drivers/infiniband/hw/qib/qib_mad.h143
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c14
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c2
-rw-r--r--drivers/net/mlx4/en_ethtool.c9
-rw-r--r--drivers/net/mlx4/en_main.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c5
-rw-r--r--drivers/net/mlx4/en_port.c6
-rw-r--r--drivers/net/mlx4/en_selftest.c3
-rw-r--r--drivers/net/mlx4/fw.c39
-rw-r--r--drivers/net/mlx4/fw.h8
-rw-r--r--drivers/net/mlx4/main.c58
-rw-r--r--drivers/net/mlx4/mcg.c17
-rw-r--r--drivers/net/mlx4/mlx4.h5
-rw-r--r--drivers/net/mlx4/port.c8
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h60
-rw-r--r--include/linux/mlx4/qp.h8
-rw-r--r--include/rdma/ib_pma.h156
-rw-r--r--include/rdma/ib_verbs.h3
53 files changed, 1006 insertions, 1162 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 81cf5fb615e5..41ec646d8a98 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3426,10 +3426,9 @@ S: Maintained
3426F: drivers/net/ipg.* 3426F: drivers/net/ipg.*
3427 3427
3428IPATH DRIVER 3428IPATH DRIVER
3429M: Ralph Campbell <infinipath@qlogic.com> 3429M: Mike Marciniszyn <infinipath@qlogic.com>
3430L: linux-rdma@vger.kernel.org 3430L: linux-rdma@vger.kernel.org
3431T: git git://git.qlogic.com/ipath-linux-2.6 3431S: Maintained
3432S: Supported
3433F: drivers/infiniband/hw/ipath/ 3432F: drivers/infiniband/hw/ipath/
3434 3433
3435IPMI SUBSYSTEM 3434IPMI SUBSYSTEM
@@ -5152,6 +5151,12 @@ M: Robert Jarzmik <robert.jarzmik@free.fr>
5152L: rtc-linux@googlegroups.com 5151L: rtc-linux@googlegroups.com
5153S: Maintained 5152S: Maintained
5154 5153
5154QIB DRIVER
5155M: Mike Marciniszyn <infinipath@qlogic.com>
5156L: linux-rdma@vger.kernel.org
5157S: Supported
5158F: drivers/infiniband/hw/qib/
5159
5155QLOGIC QLA1280 SCSI DRIVER 5160QLOGIC QLA1280 SCSI DRIVER
5156M: Michael Reed <mdr@sgi.com> 5161M: Michael Reed <mdr@sgi.com>
5157L: linux-scsi@vger.kernel.org 5162L: linux-scsi@vger.kernel.org
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f9ba7d74dfc0..9353992f9eea 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -302,7 +302,8 @@ static void ib_cache_event(struct ib_event_handler *handler,
302 event->event == IB_EVENT_LID_CHANGE || 302 event->event == IB_EVENT_LID_CHANGE ||
303 event->event == IB_EVENT_PKEY_CHANGE || 303 event->event == IB_EVENT_PKEY_CHANGE ||
304 event->event == IB_EVENT_SM_CHANGE || 304 event->event == IB_EVENT_SM_CHANGE ||
305 event->event == IB_EVENT_CLIENT_REREGISTER) { 305 event->event == IB_EVENT_CLIENT_REREGISTER ||
306 event->event == IB_EVENT_GID_CHANGE) {
306 work = kmalloc(sizeof *work, GFP_ATOMIC); 307 work = kmalloc(sizeof *work, GFP_ATOMIC);
307 if (work) { 308 if (work) {
308 INIT_WORK(&work->work, ib_cache_task); 309 INIT_WORK(&work->work, ib_cache_task);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index b6a33b3c516d..ca4c5dcd7133 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -359,6 +359,10 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv)
359 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ? 359 enum rdma_link_layer dev_ll = dev_addr->dev_type == ARPHRD_INFINIBAND ?
360 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; 360 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
361 361
362 if (dev_ll != IB_LINK_LAYER_INFINIBAND &&
363 id_priv->id.ps == RDMA_PS_IPOIB)
364 return -EINVAL;
365
362 mutex_lock(&lock); 366 mutex_lock(&lock);
363 iboe_addr_get_sgid(dev_addr, &iboe_gid); 367 iboe_addr_get_sgid(dev_addr, &iboe_gid);
364 memcpy(&gid, dev_addr->src_dev_addr + 368 memcpy(&gid, dev_addr->src_dev_addr +
@@ -406,11 +410,6 @@ static int cma_disable_callback(struct rdma_id_private *id_priv,
406 return 0; 410 return 0;
407} 411}
408 412
409static int cma_has_cm_dev(struct rdma_id_private *id_priv)
410{
411 return (id_priv->id.device && id_priv->cm_id.ib);
412}
413
414struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, 413struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,
415 void *context, enum rdma_port_space ps, 414 void *context, enum rdma_port_space ps,
416 enum ib_qp_type qp_type) 415 enum ib_qp_type qp_type)
@@ -920,11 +919,11 @@ void rdma_destroy_id(struct rdma_cm_id *id)
920 if (id_priv->cma_dev) { 919 if (id_priv->cma_dev) {
921 switch (rdma_node_get_transport(id_priv->id.device->node_type)) { 920 switch (rdma_node_get_transport(id_priv->id.device->node_type)) {
922 case RDMA_TRANSPORT_IB: 921 case RDMA_TRANSPORT_IB:
923 if (id_priv->cm_id.ib && !IS_ERR(id_priv->cm_id.ib)) 922 if (id_priv->cm_id.ib)
924 ib_destroy_cm_id(id_priv->cm_id.ib); 923 ib_destroy_cm_id(id_priv->cm_id.ib);
925 break; 924 break;
926 case RDMA_TRANSPORT_IWARP: 925 case RDMA_TRANSPORT_IWARP:
927 if (id_priv->cm_id.iw && !IS_ERR(id_priv->cm_id.iw)) 926 if (id_priv->cm_id.iw)
928 iw_destroy_cm_id(id_priv->cm_id.iw); 927 iw_destroy_cm_id(id_priv->cm_id.iw);
929 break; 928 break;
930 default: 929 default:
@@ -1085,12 +1084,12 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1085 1084
1086 if (cma_get_net_info(ib_event->private_data, listen_id->ps, 1085 if (cma_get_net_info(ib_event->private_data, listen_id->ps,
1087 &ip_ver, &port, &src, &dst)) 1086 &ip_ver, &port, &src, &dst))
1088 goto err; 1087 return NULL;
1089 1088
1090 id = rdma_create_id(listen_id->event_handler, listen_id->context, 1089 id = rdma_create_id(listen_id->event_handler, listen_id->context,
1091 listen_id->ps, ib_event->param.req_rcvd.qp_type); 1090 listen_id->ps, ib_event->param.req_rcvd.qp_type);
1092 if (IS_ERR(id)) 1091 if (IS_ERR(id))
1093 goto err; 1092 return NULL;
1094 1093
1095 cma_save_net_info(&id->route.addr, &listen_id->route.addr, 1094 cma_save_net_info(&id->route.addr, &listen_id->route.addr,
1096 ip_ver, port, src, dst); 1095 ip_ver, port, src, dst);
@@ -1100,7 +1099,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1100 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths, 1099 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
1101 GFP_KERNEL); 1100 GFP_KERNEL);
1102 if (!rt->path_rec) 1101 if (!rt->path_rec)
1103 goto destroy_id; 1102 goto err;
1104 1103
1105 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path; 1104 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
1106 if (rt->num_paths == 2) 1105 if (rt->num_paths == 2)
@@ -1114,7 +1113,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1114 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr, 1113 ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
1115 &rt->addr.dev_addr); 1114 &rt->addr.dev_addr);
1116 if (ret) 1115 if (ret)
1117 goto destroy_id; 1116 goto err;
1118 } 1117 }
1119 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); 1118 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
1120 1119
@@ -1122,9 +1121,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
1122 id_priv->state = RDMA_CM_CONNECT; 1121 id_priv->state = RDMA_CM_CONNECT;
1123 return id_priv; 1122 return id_priv;
1124 1123
1125destroy_id:
1126 rdma_destroy_id(id);
1127err: 1124err:
1125 rdma_destroy_id(id);
1128 return NULL; 1126 return NULL;
1129} 1127}
1130 1128
@@ -1468,13 +1466,15 @@ static int cma_ib_listen(struct rdma_id_private *id_priv)
1468{ 1466{
1469 struct ib_cm_compare_data compare_data; 1467 struct ib_cm_compare_data compare_data;
1470 struct sockaddr *addr; 1468 struct sockaddr *addr;
1469 struct ib_cm_id *id;
1471 __be64 svc_id; 1470 __be64 svc_id;
1472 int ret; 1471 int ret;
1473 1472
1474 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_req_handler, 1473 id = ib_create_cm_id(id_priv->id.device, cma_req_handler, id_priv);
1475 id_priv); 1474 if (IS_ERR(id))
1476 if (IS_ERR(id_priv->cm_id.ib)) 1475 return PTR_ERR(id);
1477 return PTR_ERR(id_priv->cm_id.ib); 1476
1477 id_priv->cm_id.ib = id;
1478 1478
1479 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; 1479 addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr;
1480 svc_id = cma_get_service_id(id_priv->id.ps, addr); 1480 svc_id = cma_get_service_id(id_priv->id.ps, addr);
@@ -1497,12 +1497,15 @@ static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
1497{ 1497{
1498 int ret; 1498 int ret;
1499 struct sockaddr_in *sin; 1499 struct sockaddr_in *sin;
1500 struct iw_cm_id *id;
1501
1502 id = iw_create_cm_id(id_priv->id.device,
1503 iw_conn_req_handler,
1504 id_priv);
1505 if (IS_ERR(id))
1506 return PTR_ERR(id);
1500 1507
1501 id_priv->cm_id.iw = iw_create_cm_id(id_priv->id.device, 1508 id_priv->cm_id.iw = id;
1502 iw_conn_req_handler,
1503 id_priv);
1504 if (IS_ERR(id_priv->cm_id.iw))
1505 return PTR_ERR(id_priv->cm_id.iw);
1506 1509
1507 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; 1510 sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr;
1508 id_priv->cm_id.iw->local_addr = *sin; 1511 id_priv->cm_id.iw->local_addr = *sin;
@@ -2484,6 +2487,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2484{ 2487{
2485 struct ib_cm_sidr_req_param req; 2488 struct ib_cm_sidr_req_param req;
2486 struct rdma_route *route; 2489 struct rdma_route *route;
2490 struct ib_cm_id *id;
2487 int ret; 2491 int ret;
2488 2492
2489 req.private_data_len = sizeof(struct cma_hdr) + 2493 req.private_data_len = sizeof(struct cma_hdr) +
@@ -2501,12 +2505,13 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
2501 if (ret) 2505 if (ret)
2502 goto out; 2506 goto out;
2503 2507
2504 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, 2508 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
2505 cma_sidr_rep_handler, id_priv); 2509 id_priv);
2506 if (IS_ERR(id_priv->cm_id.ib)) { 2510 if (IS_ERR(id)) {
2507 ret = PTR_ERR(id_priv->cm_id.ib); 2511 ret = PTR_ERR(id);
2508 goto out; 2512 goto out;
2509 } 2513 }
2514 id_priv->cm_id.ib = id;
2510 2515
2511 req.path = route->path_rec; 2516 req.path = route->path_rec;
2512 req.service_id = cma_get_service_id(id_priv->id.ps, 2517 req.service_id = cma_get_service_id(id_priv->id.ps,
@@ -2530,6 +2535,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2530 struct ib_cm_req_param req; 2535 struct ib_cm_req_param req;
2531 struct rdma_route *route; 2536 struct rdma_route *route;
2532 void *private_data; 2537 void *private_data;
2538 struct ib_cm_id *id;
2533 int offset, ret; 2539 int offset, ret;
2534 2540
2535 memset(&req, 0, sizeof req); 2541 memset(&req, 0, sizeof req);
@@ -2543,12 +2549,12 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2543 memcpy(private_data + offset, conn_param->private_data, 2549 memcpy(private_data + offset, conn_param->private_data,
2544 conn_param->private_data_len); 2550 conn_param->private_data_len);
2545 2551
2546 id_priv->cm_id.ib = ib_create_cm_id(id_priv->id.device, cma_ib_handler, 2552 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
2547 id_priv); 2553 if (IS_ERR(id)) {
2548 if (IS_ERR(id_priv->cm_id.ib)) { 2554 ret = PTR_ERR(id);
2549 ret = PTR_ERR(id_priv->cm_id.ib);
2550 goto out; 2555 goto out;
2551 } 2556 }
2557 id_priv->cm_id.ib = id;
2552 2558
2553 route = &id_priv->id.route; 2559 route = &id_priv->id.route;
2554 ret = cma_format_hdr(private_data, id_priv->id.ps, route); 2560 ret = cma_format_hdr(private_data, id_priv->id.ps, route);
@@ -2577,8 +2583,8 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
2577 2583
2578 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); 2584 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
2579out: 2585out:
2580 if (ret && !IS_ERR(id_priv->cm_id.ib)) { 2586 if (ret && !IS_ERR(id)) {
2581 ib_destroy_cm_id(id_priv->cm_id.ib); 2587 ib_destroy_cm_id(id);
2582 id_priv->cm_id.ib = NULL; 2588 id_priv->cm_id.ib = NULL;
2583 } 2589 }
2584 2590
@@ -2595,10 +2601,8 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
2595 struct iw_cm_conn_param iw_param; 2601 struct iw_cm_conn_param iw_param;
2596 2602
2597 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); 2603 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
2598 if (IS_ERR(cm_id)) { 2604 if (IS_ERR(cm_id))
2599 ret = PTR_ERR(cm_id); 2605 return PTR_ERR(cm_id);
2600 goto out;
2601 }
2602 2606
2603 id_priv->cm_id.iw = cm_id; 2607 id_priv->cm_id.iw = cm_id;
2604 2608
@@ -2622,7 +2626,7 @@ static int cma_connect_iw(struct rdma_id_private *id_priv,
2622 iw_param.qpn = conn_param->qp_num; 2626 iw_param.qpn = conn_param->qp_num;
2623 ret = iw_cm_connect(cm_id, &iw_param); 2627 ret = iw_cm_connect(cm_id, &iw_param);
2624out: 2628out:
2625 if (ret && !IS_ERR(cm_id)) { 2629 if (ret) {
2626 iw_destroy_cm_id(cm_id); 2630 iw_destroy_cm_id(cm_id);
2627 id_priv->cm_id.iw = NULL; 2631 id_priv->cm_id.iw = NULL;
2628 } 2632 }
@@ -2795,7 +2799,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
2795 int ret; 2799 int ret;
2796 2800
2797 id_priv = container_of(id, struct rdma_id_private, id); 2801 id_priv = container_of(id, struct rdma_id_private, id);
2798 if (!cma_has_cm_dev(id_priv)) 2802 if (!id_priv->cm_id.ib)
2799 return -EINVAL; 2803 return -EINVAL;
2800 2804
2801 switch (id->device->node_type) { 2805 switch (id->device->node_type) {
@@ -2817,7 +2821,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,
2817 int ret; 2821 int ret;
2818 2822
2819 id_priv = container_of(id, struct rdma_id_private, id); 2823 id_priv = container_of(id, struct rdma_id_private, id);
2820 if (!cma_has_cm_dev(id_priv)) 2824 if (!id_priv->cm_id.ib)
2821 return -EINVAL; 2825 return -EINVAL;
2822 2826
2823 switch (rdma_node_get_transport(id->device->node_type)) { 2827 switch (rdma_node_get_transport(id->device->node_type)) {
@@ -2848,7 +2852,7 @@ int rdma_disconnect(struct rdma_cm_id *id)
2848 int ret; 2852 int ret;
2849 2853
2850 id_priv = container_of(id, struct rdma_id_private, id); 2854 id_priv = container_of(id, struct rdma_id_private, id);
2851 if (!cma_has_cm_dev(id_priv)) 2855 if (!id_priv->cm_id.ib)
2852 return -EINVAL; 2856 return -EINVAL;
2853 2857
2854 switch (rdma_node_get_transport(id->device->node_type)) { 2858 switch (rdma_node_get_transport(id->device->node_type)) {
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 4007f721d25d..e711de400a01 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -627,6 +627,9 @@ int ib_modify_device(struct ib_device *device,
627 int device_modify_mask, 627 int device_modify_mask,
628 struct ib_device_modify *device_modify) 628 struct ib_device_modify *device_modify)
629{ 629{
630 if (!device->modify_device)
631 return -ENOSYS;
632
630 return device->modify_device(device, device_modify_mask, 633 return device->modify_device(device, device_modify_mask,
631 device_modify); 634 device_modify);
632} 635}
@@ -647,6 +650,9 @@ int ib_modify_port(struct ib_device *device,
647 u8 port_num, int port_modify_mask, 650 u8 port_num, int port_modify_mask,
648 struct ib_port_modify *port_modify) 651 struct ib_port_modify *port_modify)
649{ 652{
653 if (!device->modify_port)
654 return -ENOSYS;
655
650 if (port_num < start_port(device) || port_num > end_port(device)) 656 if (port_num < start_port(device) || port_num > end_port(device))
651 return -EINVAL; 657 return -EINVAL;
652 658
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c
index aeebc4d37e33..f101bb73be63 100644
--- a/drivers/infiniband/hw/amso1100/c2_provider.c
+++ b/drivers/infiniband/hw/amso1100/c2_provider.c
@@ -99,14 +99,6 @@ static int c2_query_port(struct ib_device *ibdev,
99 return 0; 99 return 0;
100} 100}
101 101
102static int c2_modify_port(struct ib_device *ibdev,
103 u8 port, int port_modify_mask,
104 struct ib_port_modify *props)
105{
106 pr_debug("%s:%u\n", __func__, __LINE__);
107 return 0;
108}
109
110static int c2_query_pkey(struct ib_device *ibdev, 102static int c2_query_pkey(struct ib_device *ibdev,
111 u8 port, u16 index, u16 * pkey) 103 u8 port, u16 index, u16 * pkey)
112{ 104{
@@ -817,7 +809,6 @@ int c2_register_device(struct c2_dev *dev)
817 dev->ibdev.dma_device = &dev->pcidev->dev; 809 dev->ibdev.dma_device = &dev->pcidev->dev;
818 dev->ibdev.query_device = c2_query_device; 810 dev->ibdev.query_device = c2_query_device;
819 dev->ibdev.query_port = c2_query_port; 811 dev->ibdev.query_port = c2_query_port;
820 dev->ibdev.modify_port = c2_modify_port;
821 dev->ibdev.query_pkey = c2_query_pkey; 812 dev->ibdev.query_pkey = c2_query_pkey;
822 dev->ibdev.query_gid = c2_query_gid; 813 dev->ibdev.query_gid = c2_query_gid;
823 dev->ibdev.alloc_ucontext = c2_alloc_ucontext; 814 dev->ibdev.alloc_ucontext = c2_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 2e2741307af4..c7d9411f2954 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -61,13 +61,6 @@
61#include "iwch_user.h" 61#include "iwch_user.h"
62#include "common.h" 62#include "common.h"
63 63
64static int iwch_modify_port(struct ib_device *ibdev,
65 u8 port, int port_modify_mask,
66 struct ib_port_modify *props)
67{
68 return -ENOSYS;
69}
70
71static struct ib_ah *iwch_ah_create(struct ib_pd *pd, 64static struct ib_ah *iwch_ah_create(struct ib_pd *pd,
72 struct ib_ah_attr *ah_attr) 65 struct ib_ah_attr *ah_attr)
73{ 66{
@@ -1392,7 +1385,6 @@ int iwch_register_device(struct iwch_dev *dev)
1392 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); 1385 dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
1393 dev->ibdev.query_device = iwch_query_device; 1386 dev->ibdev.query_device = iwch_query_device;
1394 dev->ibdev.query_port = iwch_query_port; 1387 dev->ibdev.query_port = iwch_query_port;
1395 dev->ibdev.modify_port = iwch_modify_port;
1396 dev->ibdev.query_pkey = iwch_query_pkey; 1388 dev->ibdev.query_pkey = iwch_query_pkey;
1397 dev->ibdev.query_gid = iwch_query_gid; 1389 dev->ibdev.query_gid = iwch_query_gid;
1398 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; 1390 dev->ibdev.alloc_ucontext = iwch_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 5b9e4220ca08..247fe706e7fa 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -58,13 +58,6 @@ static int fastreg_support = 1;
58module_param(fastreg_support, int, 0644); 58module_param(fastreg_support, int, 0644);
59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)"); 59MODULE_PARM_DESC(fastreg_support, "Advertise fastreg support (default=1)");
60 60
61static int c4iw_modify_port(struct ib_device *ibdev,
62 u8 port, int port_modify_mask,
63 struct ib_port_modify *props)
64{
65 return -ENOSYS;
66}
67
68static struct ib_ah *c4iw_ah_create(struct ib_pd *pd, 61static struct ib_ah *c4iw_ah_create(struct ib_pd *pd,
69 struct ib_ah_attr *ah_attr) 62 struct ib_ah_attr *ah_attr)
70{ 63{
@@ -456,7 +449,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
456 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); 449 dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
457 dev->ibdev.query_device = c4iw_query_device; 450 dev->ibdev.query_device = c4iw_query_device;
458 dev->ibdev.query_port = c4iw_query_port; 451 dev->ibdev.query_port = c4iw_query_port;
459 dev->ibdev.modify_port = c4iw_modify_port;
460 dev->ibdev.query_pkey = c4iw_query_pkey; 452 dev->ibdev.query_pkey = c4iw_query_pkey;
461 dev->ibdev.query_gid = c4iw_query_gid; 453 dev->ibdev.query_gid = c4iw_query_gid;
462 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext; 454 dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext;
diff --git a/drivers/infiniband/hw/cxgb4/resource.c b/drivers/infiniband/hw/cxgb4/resource.c
index 4fb50d58b493..407ff3924150 100644
--- a/drivers/infiniband/hw/cxgb4/resource.c
+++ b/drivers/infiniband/hw/cxgb4/resource.c
@@ -37,6 +37,7 @@
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38#include <linux/errno.h> 38#include <linux/errno.h>
39#include <linux/genalloc.h> 39#include <linux/genalloc.h>
40#include <linux/ratelimit.h>
40#include "iw_cxgb4.h" 41#include "iw_cxgb4.h"
41 42
42#define RANDOM_SIZE 16 43#define RANDOM_SIZE 16
@@ -311,8 +312,8 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
311{ 312{
312 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); 313 unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
313 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size); 314 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
314 if (!addr && printk_ratelimit()) 315 if (!addr)
315 printk(KERN_WARNING MOD "%s: Out of PBL memory\n", 316 printk_ratelimited(KERN_WARNING MOD "%s: Out of PBL memory\n",
316 pci_name(rdev->lldi.pdev)); 317 pci_name(rdev->lldi.pdev));
317 return (u32)addr; 318 return (u32)addr;
318} 319}
@@ -373,8 +374,8 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
373{ 374{
374 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); 375 unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
375 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); 376 PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
376 if (!addr && printk_ratelimit()) 377 if (!addr)
377 printk(KERN_WARNING MOD "%s: Out of RQT memory\n", 378 printk_ratelimited(KERN_WARNING MOD "%s: Out of RQT memory\n",
378 pci_name(rdev->lldi.pdev)); 379 pci_name(rdev->lldi.pdev));
379 return (u32)addr; 380 return (u32)addr;
380} 381}
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index ee79a2d97b14..8697eca14356 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -40,6 +40,7 @@
40#include <linux/highmem.h> 40#include <linux/highmem.h>
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/jiffies.h> 42#include <linux/jiffies.h>
43#include <linux/cpu.h>
43#include <asm/pgtable.h> 44#include <asm/pgtable.h>
44 45
45#include "ipath_kernel.h" 46#include "ipath_kernel.h"
@@ -1684,17 +1685,19 @@ static int find_best_unit(struct file *fp,
1684 * information. There may be some issues with dual core numbering 1685 * information. There may be some issues with dual core numbering
1685 * as well. This needs more work prior to release. 1686 * as well. This needs more work prior to release.
1686 */ 1687 */
1687 if (!cpumask_empty(&current->cpus_allowed) && 1688 if (!cpumask_empty(tsk_cpus_allowed(current)) &&
1688 !cpumask_full(&current->cpus_allowed)) { 1689 !cpumask_full(tsk_cpus_allowed(current))) {
1689 int ncpus = num_online_cpus(), curcpu = -1, nset = 0; 1690 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1690 for (i = 0; i < ncpus; i++) 1691 get_online_cpus();
1691 if (cpumask_test_cpu(i, &current->cpus_allowed)) { 1692 for_each_online_cpu(i)
1693 if (cpumask_test_cpu(i, tsk_cpus_allowed(current))) {
1692 ipath_cdbg(PROC, "%s[%u] affinity set for " 1694 ipath_cdbg(PROC, "%s[%u] affinity set for "
1693 "cpu %d/%d\n", current->comm, 1695 "cpu %d/%d\n", current->comm,
1694 current->pid, i, ncpus); 1696 current->pid, i, ncpus);
1695 curcpu = i; 1697 curcpu = i;
1696 nset++; 1698 nset++;
1697 } 1699 }
1700 put_online_cpus();
1698 if (curcpu != -1 && nset != ncpus) { 1701 if (curcpu != -1 && nset != ncpus) {
1699 if (npresent) { 1702 if (npresent) {
1700 prefunit = curcpu / (ncpus / npresent); 1703 prefunit = curcpu / (ncpus / npresent);
diff --git a/drivers/infiniband/hw/ipath/ipath_mad.c b/drivers/infiniband/hw/ipath/ipath_mad.c
index ceb98ee78666..43f2d0424d4f 100644
--- a/drivers/infiniband/hw/ipath/ipath_mad.c
+++ b/drivers/infiniband/hw/ipath/ipath_mad.c
@@ -32,6 +32,7 @@
32 */ 32 */
33 33
34#include <rdma/ib_smi.h> 34#include <rdma/ib_smi.h>
35#include <rdma/ib_pma.h>
35 36
36#include "ipath_kernel.h" 37#include "ipath_kernel.h"
37#include "ipath_verbs.h" 38#include "ipath_verbs.h"
@@ -789,151 +790,18 @@ static int recv_subn_set_pkeytable(struct ib_smp *smp,
789 return recv_subn_get_pkeytable(smp, ibdev); 790 return recv_subn_get_pkeytable(smp, ibdev);
790} 791}
791 792
792#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) 793static int recv_pma_get_classportinfo(struct ib_pma_mad *pmp)
793#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
794#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
795#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
796#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
797#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
798
799struct ib_perf {
800 u8 base_version;
801 u8 mgmt_class;
802 u8 class_version;
803 u8 method;
804 __be16 status;
805 __be16 unused;
806 __be64 tid;
807 __be16 attr_id;
808 __be16 resv;
809 __be32 attr_mod;
810 u8 reserved[40];
811 u8 data[192];
812} __attribute__ ((packed));
813
814struct ib_pma_classportinfo {
815 u8 base_version;
816 u8 class_version;
817 __be16 cap_mask;
818 u8 reserved[3];
819 u8 resp_time_value; /* only lower 5 bits */
820 union ib_gid redirect_gid;
821 __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
822 __be16 redirect_lid;
823 __be16 redirect_pkey;
824 __be32 redirect_qp; /* only lower 24 bits */
825 __be32 redirect_qkey;
826 union ib_gid trap_gid;
827 __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
828 __be16 trap_lid;
829 __be16 trap_pkey;
830 __be32 trap_hl_qp; /* 8, 24 bits respectively */
831 __be32 trap_qkey;
832} __attribute__ ((packed));
833
834struct ib_pma_portsamplescontrol {
835 u8 opcode;
836 u8 port_select;
837 u8 tick;
838 u8 counter_width; /* only lower 3 bits */
839 __be32 counter_mask0_9; /* 2, 10 * 3, bits */
840 __be16 counter_mask10_14; /* 1, 5 * 3, bits */
841 u8 sample_mechanisms;
842 u8 sample_status; /* only lower 2 bits */
843 __be64 option_mask;
844 __be64 vendor_mask;
845 __be32 sample_start;
846 __be32 sample_interval;
847 __be16 tag;
848 __be16 counter_select[15];
849} __attribute__ ((packed));
850
851struct ib_pma_portsamplesresult {
852 __be16 tag;
853 __be16 sample_status; /* only lower 2 bits */
854 __be32 counter[15];
855} __attribute__ ((packed));
856
857struct ib_pma_portsamplesresult_ext {
858 __be16 tag;
859 __be16 sample_status; /* only lower 2 bits */
860 __be32 extended_width; /* only upper 2 bits */
861 __be64 counter[15];
862} __attribute__ ((packed));
863
864struct ib_pma_portcounters {
865 u8 reserved;
866 u8 port_select;
867 __be16 counter_select;
868 __be16 symbol_error_counter;
869 u8 link_error_recovery_counter;
870 u8 link_downed_counter;
871 __be16 port_rcv_errors;
872 __be16 port_rcv_remphys_errors;
873 __be16 port_rcv_switch_relay_errors;
874 __be16 port_xmit_discards;
875 u8 port_xmit_constraint_errors;
876 u8 port_rcv_constraint_errors;
877 u8 reserved1;
878 u8 lli_ebor_errors; /* 4, 4, bits */
879 __be16 reserved2;
880 __be16 vl15_dropped;
881 __be32 port_xmit_data;
882 __be32 port_rcv_data;
883 __be32 port_xmit_packets;
884 __be32 port_rcv_packets;
885} __attribute__ ((packed));
886
887#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
888#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
889#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
890#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
891#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
892#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
893#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
894#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
895#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
896#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
897#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
898#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
899#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
900
901struct ib_pma_portcounters_ext {
902 u8 reserved;
903 u8 port_select;
904 __be16 counter_select;
905 __be32 reserved1;
906 __be64 port_xmit_data;
907 __be64 port_rcv_data;
908 __be64 port_xmit_packets;
909 __be64 port_rcv_packets;
910 __be64 port_unicast_xmit_packets;
911 __be64 port_unicast_rcv_packets;
912 __be64 port_multicast_xmit_packets;
913 __be64 port_multicast_rcv_packets;
914} __attribute__ ((packed));
915
916#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
917#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
918#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
919#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
920#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
921#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
922#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
923#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
924
925static int recv_pma_get_classportinfo(struct ib_perf *pmp)
926{ 794{
927 struct ib_pma_classportinfo *p = 795 struct ib_class_port_info *p =
928 (struct ib_pma_classportinfo *)pmp->data; 796 (struct ib_class_port_info *)pmp->data;
929 797
930 memset(pmp->data, 0, sizeof(pmp->data)); 798 memset(pmp->data, 0, sizeof(pmp->data));
931 799
932 if (pmp->attr_mod != 0) 800 if (pmp->mad_hdr.attr_mod != 0)
933 pmp->status |= IB_SMP_INVALID_FIELD; 801 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
934 802
935 /* Indicate AllPortSelect is valid (only one port anyway) */ 803 /* Indicate AllPortSelect is valid (only one port anyway) */
936 p->cap_mask = cpu_to_be16(1 << 8); 804 p->capability_mask = cpu_to_be16(1 << 8);
937 p->base_version = 1; 805 p->base_version = 1;
938 p->class_version = 1; 806 p->class_version = 1;
939 /* 807 /*
@@ -957,7 +825,7 @@ static int recv_pma_get_classportinfo(struct ib_perf *pmp)
957 COUNTER_MASK(1, 3) | \ 825 COUNTER_MASK(1, 3) | \
958 COUNTER_MASK(1, 4)) 826 COUNTER_MASK(1, 4))
959 827
960static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp, 828static int recv_pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
961 struct ib_device *ibdev, u8 port) 829 struct ib_device *ibdev, u8 port)
962{ 830{
963 struct ib_pma_portsamplescontrol *p = 831 struct ib_pma_portsamplescontrol *p =
@@ -970,9 +838,9 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
970 memset(pmp->data, 0, sizeof(pmp->data)); 838 memset(pmp->data, 0, sizeof(pmp->data));
971 839
972 p->port_select = port_select; 840 p->port_select = port_select;
973 if (pmp->attr_mod != 0 || 841 if (pmp->mad_hdr.attr_mod != 0 ||
974 (port_select != port && port_select != 0xFF)) 842 (port_select != port && port_select != 0xFF))
975 pmp->status |= IB_SMP_INVALID_FIELD; 843 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
976 /* 844 /*
977 * Ticks are 10x the link transfer period which for 2.5Gbs is 4 845 * Ticks are 10x the link transfer period which for 2.5Gbs is 4
978 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample 846 * nsec. 0 == 4 nsec., 1 == 8 nsec., ..., 255 == 1020 nsec. Sample
@@ -1006,7 +874,7 @@ static int recv_pma_get_portsamplescontrol(struct ib_perf *pmp,
1006 return reply((struct ib_smp *) pmp); 874 return reply((struct ib_smp *) pmp);
1007} 875}
1008 876
1009static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp, 877static int recv_pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1010 struct ib_device *ibdev, u8 port) 878 struct ib_device *ibdev, u8 port)
1011{ 879{
1012 struct ib_pma_portsamplescontrol *p = 880 struct ib_pma_portsamplescontrol *p =
@@ -1017,9 +885,9 @@ static int recv_pma_set_portsamplescontrol(struct ib_perf *pmp,
1017 u8 status; 885 u8 status;
1018 int ret; 886 int ret;
1019 887
1020 if (pmp->attr_mod != 0 || 888 if (pmp->mad_hdr.attr_mod != 0 ||
1021 (p->port_select != port && p->port_select != 0xFF)) { 889 (p->port_select != port && p->port_select != 0xFF)) {
1022 pmp->status |= IB_SMP_INVALID_FIELD; 890 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1023 ret = reply((struct ib_smp *) pmp); 891 ret = reply((struct ib_smp *) pmp);
1024 goto bail; 892 goto bail;
1025 } 893 }
@@ -1093,7 +961,7 @@ static u64 get_counter(struct ipath_ibdev *dev,
1093 return ret; 961 return ret;
1094} 962}
1095 963
1096static int recv_pma_get_portsamplesresult(struct ib_perf *pmp, 964static int recv_pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1097 struct ib_device *ibdev) 965 struct ib_device *ibdev)
1098{ 966{
1099 struct ib_pma_portsamplesresult *p = 967 struct ib_pma_portsamplesresult *p =
@@ -1118,7 +986,7 @@ static int recv_pma_get_portsamplesresult(struct ib_perf *pmp,
1118 return reply((struct ib_smp *) pmp); 986 return reply((struct ib_smp *) pmp);
1119} 987}
1120 988
1121static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp, 989static int recv_pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1122 struct ib_device *ibdev) 990 struct ib_device *ibdev)
1123{ 991{
1124 struct ib_pma_portsamplesresult_ext *p = 992 struct ib_pma_portsamplesresult_ext *p =
@@ -1145,7 +1013,7 @@ static int recv_pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1145 return reply((struct ib_smp *) pmp); 1013 return reply((struct ib_smp *) pmp);
1146} 1014}
1147 1015
1148static int recv_pma_get_portcounters(struct ib_perf *pmp, 1016static int recv_pma_get_portcounters(struct ib_pma_mad *pmp,
1149 struct ib_device *ibdev, u8 port) 1017 struct ib_device *ibdev, u8 port)
1150{ 1018{
1151 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1019 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1179,9 +1047,9 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1179 memset(pmp->data, 0, sizeof(pmp->data)); 1047 memset(pmp->data, 0, sizeof(pmp->data));
1180 1048
1181 p->port_select = port_select; 1049 p->port_select = port_select;
1182 if (pmp->attr_mod != 0 || 1050 if (pmp->mad_hdr.attr_mod != 0 ||
1183 (port_select != port && port_select != 0xFF)) 1051 (port_select != port && port_select != 0xFF))
1184 pmp->status |= IB_SMP_INVALID_FIELD; 1052 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1185 1053
1186 if (cntrs.symbol_error_counter > 0xFFFFUL) 1054 if (cntrs.symbol_error_counter > 0xFFFFUL)
1187 p->symbol_error_counter = cpu_to_be16(0xFFFF); 1055 p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1216,7 +1084,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1216 cntrs.local_link_integrity_errors = 0xFUL; 1084 cntrs.local_link_integrity_errors = 0xFUL;
1217 if (cntrs.excessive_buffer_overrun_errors > 0xFUL) 1085 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1218 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1086 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1219 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1087 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1220 cntrs.excessive_buffer_overrun_errors; 1088 cntrs.excessive_buffer_overrun_errors;
1221 if (cntrs.vl15_dropped > 0xFFFFUL) 1089 if (cntrs.vl15_dropped > 0xFFFFUL)
1222 p->vl15_dropped = cpu_to_be16(0xFFFF); 1090 p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1244,7 +1112,7 @@ static int recv_pma_get_portcounters(struct ib_perf *pmp,
1244 return reply((struct ib_smp *) pmp); 1112 return reply((struct ib_smp *) pmp);
1245} 1113}
1246 1114
1247static int recv_pma_get_portcounters_ext(struct ib_perf *pmp, 1115static int recv_pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1248 struct ib_device *ibdev, u8 port) 1116 struct ib_device *ibdev, u8 port)
1249{ 1117{
1250 struct ib_pma_portcounters_ext *p = 1118 struct ib_pma_portcounters_ext *p =
@@ -1265,9 +1133,9 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
1265 memset(pmp->data, 0, sizeof(pmp->data)); 1133 memset(pmp->data, 0, sizeof(pmp->data));
1266 1134
1267 p->port_select = port_select; 1135 p->port_select = port_select;
1268 if (pmp->attr_mod != 0 || 1136 if (pmp->mad_hdr.attr_mod != 0 ||
1269 (port_select != port && port_select != 0xFF)) 1137 (port_select != port && port_select != 0xFF))
1270 pmp->status |= IB_SMP_INVALID_FIELD; 1138 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1271 1139
1272 p->port_xmit_data = cpu_to_be64(swords); 1140 p->port_xmit_data = cpu_to_be64(swords);
1273 p->port_rcv_data = cpu_to_be64(rwords); 1141 p->port_rcv_data = cpu_to_be64(rwords);
@@ -1281,7 +1149,7 @@ static int recv_pma_get_portcounters_ext(struct ib_perf *pmp,
1281 return reply((struct ib_smp *) pmp); 1149 return reply((struct ib_smp *) pmp);
1282} 1150}
1283 1151
1284static int recv_pma_set_portcounters(struct ib_perf *pmp, 1152static int recv_pma_set_portcounters(struct ib_pma_mad *pmp,
1285 struct ib_device *ibdev, u8 port) 1153 struct ib_device *ibdev, u8 port)
1286{ 1154{
1287 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1155 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1344,7 +1212,7 @@ static int recv_pma_set_portcounters(struct ib_perf *pmp,
1344 return recv_pma_get_portcounters(pmp, ibdev, port); 1212 return recv_pma_get_portcounters(pmp, ibdev, port);
1345} 1213}
1346 1214
1347static int recv_pma_set_portcounters_ext(struct ib_perf *pmp, 1215static int recv_pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1348 struct ib_device *ibdev, u8 port) 1216 struct ib_device *ibdev, u8 port)
1349{ 1217{
1350 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1218 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1518,19 +1386,19 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
1518 struct ib_mad *in_mad, 1386 struct ib_mad *in_mad,
1519 struct ib_mad *out_mad) 1387 struct ib_mad *out_mad)
1520{ 1388{
1521 struct ib_perf *pmp = (struct ib_perf *)out_mad; 1389 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
1522 int ret; 1390 int ret;
1523 1391
1524 *out_mad = *in_mad; 1392 *out_mad = *in_mad;
1525 if (pmp->class_version != 1) { 1393 if (pmp->mad_hdr.class_version != 1) {
1526 pmp->status |= IB_SMP_UNSUP_VERSION; 1394 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
1527 ret = reply((struct ib_smp *) pmp); 1395 ret = reply((struct ib_smp *) pmp);
1528 goto bail; 1396 goto bail;
1529 } 1397 }
1530 1398
1531 switch (pmp->method) { 1399 switch (pmp->mad_hdr.method) {
1532 case IB_MGMT_METHOD_GET: 1400 case IB_MGMT_METHOD_GET:
1533 switch (pmp->attr_id) { 1401 switch (pmp->mad_hdr.attr_id) {
1534 case IB_PMA_CLASS_PORT_INFO: 1402 case IB_PMA_CLASS_PORT_INFO:
1535 ret = recv_pma_get_classportinfo(pmp); 1403 ret = recv_pma_get_classportinfo(pmp);
1536 goto bail; 1404 goto bail;
@@ -1554,13 +1422,13 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
1554 port_num); 1422 port_num);
1555 goto bail; 1423 goto bail;
1556 default: 1424 default:
1557 pmp->status |= IB_SMP_UNSUP_METH_ATTR; 1425 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1558 ret = reply((struct ib_smp *) pmp); 1426 ret = reply((struct ib_smp *) pmp);
1559 goto bail; 1427 goto bail;
1560 } 1428 }
1561 1429
1562 case IB_MGMT_METHOD_SET: 1430 case IB_MGMT_METHOD_SET:
1563 switch (pmp->attr_id) { 1431 switch (pmp->mad_hdr.attr_id) {
1564 case IB_PMA_PORT_SAMPLES_CONTROL: 1432 case IB_PMA_PORT_SAMPLES_CONTROL:
1565 ret = recv_pma_set_portsamplescontrol(pmp, ibdev, 1433 ret = recv_pma_set_portsamplescontrol(pmp, ibdev,
1566 port_num); 1434 port_num);
@@ -1574,7 +1442,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
1574 port_num); 1442 port_num);
1575 goto bail; 1443 goto bail;
1576 default: 1444 default:
1577 pmp->status |= IB_SMP_UNSUP_METH_ATTR; 1445 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1578 ret = reply((struct ib_smp *) pmp); 1446 ret = reply((struct ib_smp *) pmp);
1579 goto bail; 1447 goto bail;
1580 } 1448 }
@@ -1588,7 +1456,7 @@ static int process_perf(struct ib_device *ibdev, u8 port_num,
1588 ret = IB_MAD_RESULT_SUCCESS; 1456 ret = IB_MAD_RESULT_SUCCESS;
1589 goto bail; 1457 goto bail;
1590 default: 1458 default:
1591 pmp->status |= IB_SMP_UNSUP_METHOD; 1459 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
1592 ret = reply((struct ib_smp *) pmp); 1460 ret = reply((struct ib_smp *) pmp);
1593 } 1461 }
1594 1462
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 57ffa50f509e..f36da994a85a 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -35,6 +35,7 @@
35 35
36#include <linux/mlx4/cmd.h> 36#include <linux/mlx4/cmd.h>
37#include <linux/gfp.h> 37#include <linux/gfp.h>
38#include <rdma/ib_pma.h>
38 39
39#include "mlx4_ib.h" 40#include "mlx4_ib.h"
40 41
@@ -232,7 +233,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma
232 } 233 }
233} 234}
234 235
235int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, 236static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
236 struct ib_wc *in_wc, struct ib_grh *in_grh, 237 struct ib_wc *in_wc, struct ib_grh *in_grh,
237 struct ib_mad *in_mad, struct ib_mad *out_mad) 238 struct ib_mad *in_mad, struct ib_mad *out_mad)
238{ 239{
@@ -302,6 +303,71 @@ int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
302 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; 303 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
303} 304}
304 305
306static void edit_counter(struct mlx4_counter *cnt,
307 struct ib_pma_portcounters *pma_cnt)
308{
309 pma_cnt->port_xmit_data = cpu_to_be32((be64_to_cpu(cnt->tx_bytes)>>2));
310 pma_cnt->port_rcv_data = cpu_to_be32((be64_to_cpu(cnt->rx_bytes)>>2));
311 pma_cnt->port_xmit_packets = cpu_to_be32(be64_to_cpu(cnt->tx_frames));
312 pma_cnt->port_rcv_packets = cpu_to_be32(be64_to_cpu(cnt->rx_frames));
313}
314
315static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
316 struct ib_wc *in_wc, struct ib_grh *in_grh,
317 struct ib_mad *in_mad, struct ib_mad *out_mad)
318{
319 struct mlx4_cmd_mailbox *mailbox;
320 struct mlx4_ib_dev *dev = to_mdev(ibdev);
321 int err;
322 u32 inmod = dev->counters[port_num - 1] & 0xffff;
323 u8 mode;
324
325 if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
326 return -EINVAL;
327
328 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
329 if (IS_ERR(mailbox))
330 return IB_MAD_RESULT_FAILURE;
331
332 err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
333 MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
334 if (err)
335 err = IB_MAD_RESULT_FAILURE;
336 else {
337 memset(out_mad->data, 0, sizeof out_mad->data);
338 mode = ((struct mlx4_counter *)mailbox->buf)->counter_mode;
339 switch (mode & 0xf) {
340 case 0:
341 edit_counter(mailbox->buf,
342 (void *)(out_mad->data + 40));
343 err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
344 break;
345 default:
346 err = IB_MAD_RESULT_FAILURE;
347 }
348 }
349
350 mlx4_free_cmd_mailbox(dev->dev, mailbox);
351
352 return err;
353}
354
355int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
356 struct ib_wc *in_wc, struct ib_grh *in_grh,
357 struct ib_mad *in_mad, struct ib_mad *out_mad)
358{
359 switch (rdma_port_get_link_layer(ibdev, port_num)) {
360 case IB_LINK_LAYER_INFINIBAND:
361 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
362 in_grh, in_mad, out_mad);
363 case IB_LINK_LAYER_ETHERNET:
364 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
365 in_grh, in_mad, out_mad);
366 default:
367 return -EINVAL;
368 }
369}
370
305static void send_handler(struct ib_mad_agent *agent, 371static void send_handler(struct ib_mad_agent *agent,
306 struct ib_mad_send_wc *mad_send_wc) 372 struct ib_mad_send_wc *mad_send_wc)
307{ 373{
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index fbe1973f77b0..fa643f4f4e28 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -816,7 +816,7 @@ static void update_gids_task(struct work_struct *work)
816 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids); 816 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
817 event.device = &gw->dev->ib_dev; 817 event.device = &gw->dev->ib_dev;
818 event.element.port_num = gw->port; 818 event.element.port_num = gw->port;
819 event.event = IB_EVENT_LID_CHANGE; 819 event.event = IB_EVENT_GID_CHANGE;
820 ib_dispatch_event(&event); 820 ib_dispatch_event(&event);
821 } 821 }
822 822
@@ -1098,11 +1098,21 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
1098 if (init_node_data(ibdev)) 1098 if (init_node_data(ibdev))
1099 goto err_map; 1099 goto err_map;
1100 1100
1101 for (i = 0; i < ibdev->num_ports; ++i) {
1102 if (mlx4_ib_port_link_layer(&ibdev->ib_dev, i + 1) ==
1103 IB_LINK_LAYER_ETHERNET) {
1104 err = mlx4_counter_alloc(ibdev->dev, &ibdev->counters[i]);
1105 if (err)
1106 ibdev->counters[i] = -1;
1107 } else
1108 ibdev->counters[i] = -1;
1109 }
1110
1101 spin_lock_init(&ibdev->sm_lock); 1111 spin_lock_init(&ibdev->sm_lock);
1102 mutex_init(&ibdev->cap_mask_mutex); 1112 mutex_init(&ibdev->cap_mask_mutex);
1103 1113
1104 if (ib_register_device(&ibdev->ib_dev, NULL)) 1114 if (ib_register_device(&ibdev->ib_dev, NULL))
1105 goto err_map; 1115 goto err_counter;
1106 1116
1107 if (mlx4_ib_mad_init(ibdev)) 1117 if (mlx4_ib_mad_init(ibdev))
1108 goto err_reg; 1118 goto err_reg;
@@ -1132,6 +1142,10 @@ err_notif:
1132err_reg: 1142err_reg:
1133 ib_unregister_device(&ibdev->ib_dev); 1143 ib_unregister_device(&ibdev->ib_dev);
1134 1144
1145err_counter:
1146 for (; i; --i)
1147 mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
1148
1135err_map: 1149err_map:
1136 iounmap(ibdev->uar_map); 1150 iounmap(ibdev->uar_map);
1137 1151
@@ -1160,7 +1174,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1160 ibdev->iboe.nb.notifier_call = NULL; 1174 ibdev->iboe.nb.notifier_call = NULL;
1161 } 1175 }
1162 iounmap(ibdev->uar_map); 1176 iounmap(ibdev->uar_map);
1163 1177 for (p = 0; p < ibdev->num_ports; ++p)
1178 mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
1164 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) 1179 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
1165 mlx4_CLOSE_PORT(dev, p); 1180 mlx4_CLOSE_PORT(dev, p);
1166 1181
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index 2a322f21049f..e4bf2cff8662 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -193,6 +193,7 @@ struct mlx4_ib_dev {
193 struct mutex cap_mask_mutex; 193 struct mutex cap_mask_mutex;
194 bool ib_active; 194 bool ib_active;
195 struct mlx4_ib_iboe iboe; 195 struct mlx4_ib_iboe iboe;
196 int counters[MLX4_MAX_PORTS];
196}; 197};
197 198
198static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev) 199static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2001f20a4361..3a91d9d8dc51 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -893,7 +893,6 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
893 --path->static_rate; 893 --path->static_rate;
894 } else 894 } else
895 path->static_rate = 0; 895 path->static_rate = 0;
896 path->counter_index = 0xff;
897 896
898 if (ah->ah_flags & IB_AH_GRH) { 897 if (ah->ah_flags & IB_AH_GRH) {
899 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { 898 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
@@ -1034,6 +1033,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1034 } 1033 }
1035 } 1034 }
1036 1035
1036 if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
1037 if (dev->counters[qp->port - 1] != -1) {
1038 context->pri_path.counter_index =
1039 dev->counters[qp->port - 1];
1040 optpar |= MLX4_QP_OPTPAR_COUNTER_INDEX;
1041 } else
1042 context->pri_path.counter_index = 0xff;
1043 }
1044
1037 if (attr_mask & IB_QP_PKEY_INDEX) { 1045 if (attr_mask & IB_QP_PKEY_INDEX) {
1038 context->pri_path.pkey_index = attr->pkey_index; 1046 context->pri_path.pkey_index = attr->pkey_index;
1039 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX; 1047 optpar |= MLX4_QP_OPTPAR_PKEY_INDEX;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 7bfa2a164955..3082b3b3d620 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -301,6 +301,38 @@ static int mthca_cmd_post(struct mthca_dev *dev,
301 return err; 301 return err;
302} 302}
303 303
304
305static int mthca_status_to_errno(u8 status)
306{
307 static const int trans_table[] = {
308 [MTHCA_CMD_STAT_INTERNAL_ERR] = -EIO,
309 [MTHCA_CMD_STAT_BAD_OP] = -EPERM,
310 [MTHCA_CMD_STAT_BAD_PARAM] = -EINVAL,
311 [MTHCA_CMD_STAT_BAD_SYS_STATE] = -ENXIO,
312 [MTHCA_CMD_STAT_BAD_RESOURCE] = -EBADF,
313 [MTHCA_CMD_STAT_RESOURCE_BUSY] = -EBUSY,
314 [MTHCA_CMD_STAT_DDR_MEM_ERR] = -ENOMEM,
315 [MTHCA_CMD_STAT_EXCEED_LIM] = -ENOMEM,
316 [MTHCA_CMD_STAT_BAD_RES_STATE] = -EBADF,
317 [MTHCA_CMD_STAT_BAD_INDEX] = -EBADF,
318 [MTHCA_CMD_STAT_BAD_NVMEM] = -EFAULT,
319 [MTHCA_CMD_STAT_BAD_QPEE_STATE] = -EINVAL,
320 [MTHCA_CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
321 [MTHCA_CMD_STAT_REG_BOUND] = -EBUSY,
322 [MTHCA_CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
323 [MTHCA_CMD_STAT_BAD_PKT] = -EBADMSG,
324 [MTHCA_CMD_STAT_BAD_SIZE] = -ENOMEM,
325 };
326
327 if (status >= ARRAY_SIZE(trans_table) ||
328 (status != MTHCA_CMD_STAT_OK
329 && trans_table[status] == 0))
330 return -EINVAL;
331
332 return trans_table[status];
333}
334
335
304static int mthca_cmd_poll(struct mthca_dev *dev, 336static int mthca_cmd_poll(struct mthca_dev *dev,
305 u64 in_param, 337 u64 in_param,
306 u64 *out_param, 338 u64 *out_param,
@@ -308,11 +340,11 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
308 u32 in_modifier, 340 u32 in_modifier,
309 u8 op_modifier, 341 u8 op_modifier,
310 u16 op, 342 u16 op,
311 unsigned long timeout, 343 unsigned long timeout)
312 u8 *status)
313{ 344{
314 int err = 0; 345 int err = 0;
315 unsigned long end; 346 unsigned long end;
347 u8 status;
316 348
317 down(&dev->cmd.poll_sem); 349 down(&dev->cmd.poll_sem);
318 350
@@ -341,7 +373,12 @@ static int mthca_cmd_poll(struct mthca_dev *dev,
341 (u64) be32_to_cpu((__force __be32) 373 (u64) be32_to_cpu((__force __be32)
342 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); 374 __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4));
343 375
344 *status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24; 376 status = be32_to_cpu((__force __be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24;
377 if (status) {
378 mthca_dbg(dev, "Command %02x completed with status %02x\n",
379 op, status);
380 err = mthca_status_to_errno(status);
381 }
345 382
346out: 383out:
347 up(&dev->cmd.poll_sem); 384 up(&dev->cmd.poll_sem);
@@ -374,8 +411,7 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
374 u32 in_modifier, 411 u32 in_modifier,
375 u8 op_modifier, 412 u8 op_modifier,
376 u16 op, 413 u16 op,
377 unsigned long timeout, 414 unsigned long timeout)
378 u8 *status)
379{ 415{
380 int err = 0; 416 int err = 0;
381 struct mthca_cmd_context *context; 417 struct mthca_cmd_context *context;
@@ -407,10 +443,11 @@ static int mthca_cmd_wait(struct mthca_dev *dev,
407 if (err) 443 if (err)
408 goto out; 444 goto out;
409 445
410 *status = context->status; 446 if (context->status) {
411 if (*status)
412 mthca_dbg(dev, "Command %02x completed with status %02x\n", 447 mthca_dbg(dev, "Command %02x completed with status %02x\n",
413 op, *status); 448 op, context->status);
449 err = mthca_status_to_errno(context->status);
450 }
414 451
415 if (out_is_imm) 452 if (out_is_imm)
416 *out_param = context->out_param; 453 *out_param = context->out_param;
@@ -432,17 +469,16 @@ static int mthca_cmd_box(struct mthca_dev *dev,
432 u32 in_modifier, 469 u32 in_modifier,
433 u8 op_modifier, 470 u8 op_modifier,
434 u16 op, 471 u16 op,
435 unsigned long timeout, 472 unsigned long timeout)
436 u8 *status)
437{ 473{
438 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) 474 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
439 return mthca_cmd_wait(dev, in_param, &out_param, 0, 475 return mthca_cmd_wait(dev, in_param, &out_param, 0,
440 in_modifier, op_modifier, op, 476 in_modifier, op_modifier, op,
441 timeout, status); 477 timeout);
442 else 478 else
443 return mthca_cmd_poll(dev, in_param, &out_param, 0, 479 return mthca_cmd_poll(dev, in_param, &out_param, 0,
444 in_modifier, op_modifier, op, 480 in_modifier, op_modifier, op,
445 timeout, status); 481 timeout);
446} 482}
447 483
448/* Invoke a command with no output parameter */ 484/* Invoke a command with no output parameter */
@@ -451,11 +487,10 @@ static int mthca_cmd(struct mthca_dev *dev,
451 u32 in_modifier, 487 u32 in_modifier,
452 u8 op_modifier, 488 u8 op_modifier,
453 u16 op, 489 u16 op,
454 unsigned long timeout, 490 unsigned long timeout)
455 u8 *status)
456{ 491{
457 return mthca_cmd_box(dev, in_param, 0, in_modifier, 492 return mthca_cmd_box(dev, in_param, 0, in_modifier,
458 op_modifier, op, timeout, status); 493 op_modifier, op, timeout);
459} 494}
460 495
461/* 496/*
@@ -469,17 +504,16 @@ static int mthca_cmd_imm(struct mthca_dev *dev,
469 u32 in_modifier, 504 u32 in_modifier,
470 u8 op_modifier, 505 u8 op_modifier,
471 u16 op, 506 u16 op,
472 unsigned long timeout, 507 unsigned long timeout)
473 u8 *status)
474{ 508{
475 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS) 509 if (dev->cmd.flags & MTHCA_CMD_USE_EVENTS)
476 return mthca_cmd_wait(dev, in_param, out_param, 1, 510 return mthca_cmd_wait(dev, in_param, out_param, 1,
477 in_modifier, op_modifier, op, 511 in_modifier, op_modifier, op,
478 timeout, status); 512 timeout);
479 else 513 else
480 return mthca_cmd_poll(dev, in_param, out_param, 1, 514 return mthca_cmd_poll(dev, in_param, out_param, 1,
481 in_modifier, op_modifier, op, 515 in_modifier, op_modifier, op,
482 timeout, status); 516 timeout);
483} 517}
484 518
485int mthca_cmd_init(struct mthca_dev *dev) 519int mthca_cmd_init(struct mthca_dev *dev)
@@ -596,14 +630,14 @@ void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox)
596 kfree(mailbox); 630 kfree(mailbox);
597} 631}
598 632
599int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) 633int mthca_SYS_EN(struct mthca_dev *dev)
600{ 634{
601 u64 out; 635 u64 out;
602 int ret; 636 int ret;
603 637
604 ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D, status); 638 ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, CMD_TIME_CLASS_D);
605 639
606 if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR) 640 if (ret == -ENOMEM)
607 mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, " 641 mthca_warn(dev, "SYS_EN DDR error: syn=%x, sock=%d, "
608 "sladdr=%d, SPD source=%s\n", 642 "sladdr=%d, SPD source=%s\n",
609 (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, 643 (int) (out >> 6) & 0xf, (int) (out >> 4) & 3,
@@ -612,13 +646,13 @@ int mthca_SYS_EN(struct mthca_dev *dev, u8 *status)
612 return ret; 646 return ret;
613} 647}
614 648
615int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) 649int mthca_SYS_DIS(struct mthca_dev *dev)
616{ 650{
617 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); 651 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
618} 652}
619 653
620static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, 654static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
621 u64 virt, u8 *status) 655 u64 virt)
622{ 656{
623 struct mthca_mailbox *mailbox; 657 struct mthca_mailbox *mailbox;
624 struct mthca_icm_iter iter; 658 struct mthca_icm_iter iter;
@@ -666,8 +700,8 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
666 700
667 if (++nent == MTHCA_MAILBOX_SIZE / 16) { 701 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
668 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 702 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
669 CMD_TIME_CLASS_B, status); 703 CMD_TIME_CLASS_B);
670 if (err || *status) 704 if (err)
671 goto out; 705 goto out;
672 nent = 0; 706 nent = 0;
673 } 707 }
@@ -676,7 +710,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
676 710
677 if (nent) 711 if (nent)
678 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, 712 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
679 CMD_TIME_CLASS_B, status); 713 CMD_TIME_CLASS_B);
680 714
681 switch (op) { 715 switch (op) {
682 case CMD_MAP_FA: 716 case CMD_MAP_FA:
@@ -696,19 +730,19 @@ out:
696 return err; 730 return err;
697} 731}
698 732
699int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) 733int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm)
700{ 734{
701 return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1, status); 735 return mthca_map_cmd(dev, CMD_MAP_FA, icm, -1);
702} 736}
703 737
704int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) 738int mthca_UNMAP_FA(struct mthca_dev *dev)
705{ 739{
706 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); 740 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B);
707} 741}
708 742
709int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) 743int mthca_RUN_FW(struct mthca_dev *dev)
710{ 744{
711 return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status); 745 return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A);
712} 746}
713 747
714static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base) 748static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
@@ -737,7 +771,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
737 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n"); 771 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
738} 772}
739 773
740int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) 774int mthca_QUERY_FW(struct mthca_dev *dev)
741{ 775{
742 struct mthca_mailbox *mailbox; 776 struct mthca_mailbox *mailbox;
743 u32 *outbox; 777 u32 *outbox;
@@ -771,7 +805,7 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
771 outbox = mailbox->buf; 805 outbox = mailbox->buf;
772 806
773 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, 807 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW,
774 CMD_TIME_CLASS_A, status); 808 CMD_TIME_CLASS_A);
775 809
776 if (err) 810 if (err)
777 goto out; 811 goto out;
@@ -843,7 +877,7 @@ out:
843 return err; 877 return err;
844} 878}
845 879
846int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) 880int mthca_ENABLE_LAM(struct mthca_dev *dev)
847{ 881{
848 struct mthca_mailbox *mailbox; 882 struct mthca_mailbox *mailbox;
849 u8 info; 883 u8 info;
@@ -864,14 +898,11 @@ int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status)
864 outbox = mailbox->buf; 898 outbox = mailbox->buf;
865 899
866 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, 900 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM,
867 CMD_TIME_CLASS_C, status); 901 CMD_TIME_CLASS_C);
868 902
869 if (err) 903 if (err)
870 goto out; 904 goto out;
871 905
872 if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE)
873 goto out;
874
875 MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); 906 MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET);
876 MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET); 907 MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET);
877 MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET); 908 MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET);
@@ -896,12 +927,12 @@ out:
896 return err; 927 return err;
897} 928}
898 929
899int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) 930int mthca_DISABLE_LAM(struct mthca_dev *dev)
900{ 931{
901 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); 932 return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C);
902} 933}
903 934
904int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) 935int mthca_QUERY_DDR(struct mthca_dev *dev)
905{ 936{
906 struct mthca_mailbox *mailbox; 937 struct mthca_mailbox *mailbox;
907 u8 info; 938 u8 info;
@@ -922,7 +953,7 @@ int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status)
922 outbox = mailbox->buf; 953 outbox = mailbox->buf;
923 954
924 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, 955 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR,
925 CMD_TIME_CLASS_A, status); 956 CMD_TIME_CLASS_A);
926 957
927 if (err) 958 if (err)
928 goto out; 959 goto out;
@@ -952,7 +983,7 @@ out:
952} 983}
953 984
954int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 985int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
955 struct mthca_dev_lim *dev_lim, u8 *status) 986 struct mthca_dev_lim *dev_lim)
956{ 987{
957 struct mthca_mailbox *mailbox; 988 struct mthca_mailbox *mailbox;
958 u32 *outbox; 989 u32 *outbox;
@@ -1028,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1028 outbox = mailbox->buf; 1059 outbox = mailbox->buf;
1029 1060
1030 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, 1061 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM,
1031 CMD_TIME_CLASS_A, status); 1062 CMD_TIME_CLASS_A);
1032 1063
1033 if (err) 1064 if (err)
1034 goto out; 1065 goto out;
@@ -1232,7 +1263,7 @@ static void get_board_id(void *vsd, char *board_id)
1232} 1263}
1233 1264
1234int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 1265int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1235 struct mthca_adapter *adapter, u8 *status) 1266 struct mthca_adapter *adapter)
1236{ 1267{
1237 struct mthca_mailbox *mailbox; 1268 struct mthca_mailbox *mailbox;
1238 u32 *outbox; 1269 u32 *outbox;
@@ -1251,7 +1282,7 @@ int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
1251 outbox = mailbox->buf; 1282 outbox = mailbox->buf;
1252 1283
1253 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, 1284 err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER,
1254 CMD_TIME_CLASS_A, status); 1285 CMD_TIME_CLASS_A);
1255 1286
1256 if (err) 1287 if (err)
1257 goto out; 1288 goto out;
@@ -1275,8 +1306,7 @@ out:
1275} 1306}
1276 1307
1277int mthca_INIT_HCA(struct mthca_dev *dev, 1308int mthca_INIT_HCA(struct mthca_dev *dev,
1278 struct mthca_init_hca_param *param, 1309 struct mthca_init_hca_param *param)
1279 u8 *status)
1280{ 1310{
1281 struct mthca_mailbox *mailbox; 1311 struct mthca_mailbox *mailbox;
1282 __be32 *inbox; 1312 __be32 *inbox;
@@ -1393,7 +1423,8 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1393 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); 1423 MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
1394 } 1424 }
1395 1425
1396 err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, CMD_TIME_CLASS_D, status); 1426 err = mthca_cmd(dev, mailbox->dma, 0, 0,
1427 CMD_INIT_HCA, CMD_TIME_CLASS_D);
1397 1428
1398 mthca_free_mailbox(dev, mailbox); 1429 mthca_free_mailbox(dev, mailbox);
1399 return err; 1430 return err;
@@ -1401,7 +1432,7 @@ int mthca_INIT_HCA(struct mthca_dev *dev,
1401 1432
1402int mthca_INIT_IB(struct mthca_dev *dev, 1433int mthca_INIT_IB(struct mthca_dev *dev,
1403 struct mthca_init_ib_param *param, 1434 struct mthca_init_ib_param *param,
1404 int port, u8 *status) 1435 int port)
1405{ 1436{
1406 struct mthca_mailbox *mailbox; 1437 struct mthca_mailbox *mailbox;
1407 u32 *inbox; 1438 u32 *inbox;
@@ -1445,24 +1476,24 @@ int mthca_INIT_IB(struct mthca_dev *dev,
1445 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); 1476 MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET);
1446 1477
1447 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, 1478 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB,
1448 CMD_TIME_CLASS_A, status); 1479 CMD_TIME_CLASS_A);
1449 1480
1450 mthca_free_mailbox(dev, mailbox); 1481 mthca_free_mailbox(dev, mailbox);
1451 return err; 1482 return err;
1452} 1483}
1453 1484
1454int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) 1485int mthca_CLOSE_IB(struct mthca_dev *dev, int port)
1455{ 1486{
1456 return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A, status); 1487 return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, CMD_TIME_CLASS_A);
1457} 1488}
1458 1489
1459int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) 1490int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic)
1460{ 1491{
1461 return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C, status); 1492 return mthca_cmd(dev, 0, 0, panic, CMD_CLOSE_HCA, CMD_TIME_CLASS_C);
1462} 1493}
1463 1494
1464int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 1495int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1465 int port, u8 *status) 1496 int port)
1466{ 1497{
1467 struct mthca_mailbox *mailbox; 1498 struct mthca_mailbox *mailbox;
1468 u32 *inbox; 1499 u32 *inbox;
@@ -1491,18 +1522,18 @@ int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
1491 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); 1522 MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET);
1492 1523
1493 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, 1524 err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB,
1494 CMD_TIME_CLASS_B, status); 1525 CMD_TIME_CLASS_B);
1495 1526
1496 mthca_free_mailbox(dev, mailbox); 1527 mthca_free_mailbox(dev, mailbox);
1497 return err; 1528 return err;
1498} 1529}
1499 1530
1500int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) 1531int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt)
1501{ 1532{
1502 return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status); 1533 return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt);
1503} 1534}
1504 1535
1505int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) 1536int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt)
1506{ 1537{
1507 struct mthca_mailbox *mailbox; 1538 struct mthca_mailbox *mailbox;
1508 __be64 *inbox; 1539 __be64 *inbox;
@@ -1517,7 +1548,7 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
1517 inbox[1] = cpu_to_be64(dma_addr); 1548 inbox[1] = cpu_to_be64(dma_addr);
1518 1549
1519 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, 1550 err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM,
1520 CMD_TIME_CLASS_B, status); 1551 CMD_TIME_CLASS_B);
1521 1552
1522 mthca_free_mailbox(dev, mailbox); 1553 mthca_free_mailbox(dev, mailbox);
1523 1554
@@ -1528,31 +1559,31 @@ int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status
1528 return err; 1559 return err;
1529} 1560}
1530 1561
1531int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) 1562int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count)
1532{ 1563{
1533 mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n", 1564 mthca_dbg(dev, "Unmapping %d pages at %llx from ICM.\n",
1534 page_count, (unsigned long long) virt); 1565 page_count, (unsigned long long) virt);
1535 1566
1536 return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status); 1567 return mthca_cmd(dev, virt, page_count, 0,
1568 CMD_UNMAP_ICM, CMD_TIME_CLASS_B);
1537} 1569}
1538 1570
1539int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) 1571int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm)
1540{ 1572{
1541 return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1, status); 1573 return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, -1);
1542} 1574}
1543 1575
1544int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) 1576int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev)
1545{ 1577{
1546 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status); 1578 return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B);
1547} 1579}
1548 1580
1549int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 1581int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages)
1550 u8 *status)
1551{ 1582{
1552 int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, 1583 int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0,
1553 CMD_TIME_CLASS_A, status); 1584 0, CMD_SET_ICM_SIZE, CMD_TIME_CLASS_A);
1554 1585
1555 if (ret || status) 1586 if (ret)
1556 return ret; 1587 return ret;
1557 1588
1558 /* 1589 /*
@@ -1566,74 +1597,73 @@ int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages,
1566} 1597}
1567 1598
1568int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1599int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1569 int mpt_index, u8 *status) 1600 int mpt_index)
1570{ 1601{
1571 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, 1602 return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT,
1572 CMD_TIME_CLASS_B, status); 1603 CMD_TIME_CLASS_B);
1573} 1604}
1574 1605
1575int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1606int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1576 int mpt_index, u8 *status) 1607 int mpt_index)
1577{ 1608{
1578 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, 1609 return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
1579 !mailbox, CMD_HW2SW_MPT, 1610 !mailbox, CMD_HW2SW_MPT,
1580 CMD_TIME_CLASS_B, status); 1611 CMD_TIME_CLASS_B);
1581} 1612}
1582 1613
1583int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1614int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1584 int num_mtt, u8 *status) 1615 int num_mtt)
1585{ 1616{
1586 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, 1617 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
1587 CMD_TIME_CLASS_B, status); 1618 CMD_TIME_CLASS_B);
1588} 1619}
1589 1620
1590int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) 1621int mthca_SYNC_TPT(struct mthca_dev *dev)
1591{ 1622{
1592 return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status); 1623 return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B);
1593} 1624}
1594 1625
1595int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 1626int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
1596 int eq_num, u8 *status) 1627 int eq_num)
1597{ 1628{
1598 mthca_dbg(dev, "%s mask %016llx for eqn %d\n", 1629 mthca_dbg(dev, "%s mask %016llx for eqn %d\n",
1599 unmap ? "Clearing" : "Setting", 1630 unmap ? "Clearing" : "Setting",
1600 (unsigned long long) event_mask, eq_num); 1631 (unsigned long long) event_mask, eq_num);
1601 return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, 1632 return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num,
1602 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); 1633 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
1603} 1634}
1604 1635
1605int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1636int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1606 int eq_num, u8 *status) 1637 int eq_num)
1607{ 1638{
1608 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, 1639 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
1609 CMD_TIME_CLASS_A, status); 1640 CMD_TIME_CLASS_A);
1610} 1641}
1611 1642
1612int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1643int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1613 int eq_num, u8 *status) 1644 int eq_num)
1614{ 1645{
1615 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, 1646 return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0,
1616 CMD_HW2SW_EQ, 1647 CMD_HW2SW_EQ,
1617 CMD_TIME_CLASS_A, status); 1648 CMD_TIME_CLASS_A);
1618} 1649}
1619 1650
1620int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1651int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1621 int cq_num, u8 *status) 1652 int cq_num)
1622{ 1653{
1623 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, 1654 return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ,
1624 CMD_TIME_CLASS_A, status); 1655 CMD_TIME_CLASS_A);
1625} 1656}
1626 1657
1627int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1658int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1628 int cq_num, u8 *status) 1659 int cq_num)
1629{ 1660{
1630 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, 1661 return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0,
1631 CMD_HW2SW_CQ, 1662 CMD_HW2SW_CQ,
1632 CMD_TIME_CLASS_A, status); 1663 CMD_TIME_CLASS_A);
1633} 1664}
1634 1665
1635int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, 1666int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size)
1636 u8 *status)
1637{ 1667{
1638 struct mthca_mailbox *mailbox; 1668 struct mthca_mailbox *mailbox;
1639 __be32 *inbox; 1669 __be32 *inbox;
@@ -1657,44 +1687,43 @@ int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size,
1657 MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET); 1687 MTHCA_PUT(inbox, lkey, RESIZE_CQ_LKEY_OFFSET);
1658 1688
1659 err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ, 1689 err = mthca_cmd(dev, mailbox->dma, cq_num, 1, CMD_RESIZE_CQ,
1660 CMD_TIME_CLASS_B, status); 1690 CMD_TIME_CLASS_B);
1661 1691
1662 mthca_free_mailbox(dev, mailbox); 1692 mthca_free_mailbox(dev, mailbox);
1663 return err; 1693 return err;
1664} 1694}
1665 1695
1666int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1696int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1667 int srq_num, u8 *status) 1697 int srq_num)
1668{ 1698{
1669 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, 1699 return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ,
1670 CMD_TIME_CLASS_A, status); 1700 CMD_TIME_CLASS_A);
1671} 1701}
1672 1702
1673int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1703int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1674 int srq_num, u8 *status) 1704 int srq_num)
1675{ 1705{
1676 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, 1706 return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0,
1677 CMD_HW2SW_SRQ, 1707 CMD_HW2SW_SRQ,
1678 CMD_TIME_CLASS_A, status); 1708 CMD_TIME_CLASS_A);
1679} 1709}
1680 1710
1681int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, 1711int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
1682 struct mthca_mailbox *mailbox, u8 *status) 1712 struct mthca_mailbox *mailbox)
1683{ 1713{
1684 return mthca_cmd_box(dev, 0, mailbox->dma, num, 0, 1714 return mthca_cmd_box(dev, 0, mailbox->dma, num, 0,
1685 CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status); 1715 CMD_QUERY_SRQ, CMD_TIME_CLASS_A);
1686} 1716}
1687 1717
1688int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) 1718int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit)
1689{ 1719{
1690 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, 1720 return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ,
1691 CMD_TIME_CLASS_B, status); 1721 CMD_TIME_CLASS_B);
1692} 1722}
1693 1723
1694int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, 1724int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
1695 enum ib_qp_state next, u32 num, int is_ee, 1725 enum ib_qp_state next, u32 num, int is_ee,
1696 struct mthca_mailbox *mailbox, u32 optmask, 1726 struct mthca_mailbox *mailbox, u32 optmask)
1697 u8 *status)
1698{ 1727{
1699 static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { 1728 static const u16 op[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1700 [IB_QPS_RESET] = { 1729 [IB_QPS_RESET] = {
@@ -1755,7 +1784,7 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
1755 1784
1756 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, 1785 err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
1757 (!!is_ee << 24) | num, op_mod, 1786 (!!is_ee << 24) | num, op_mod,
1758 op[cur][next], CMD_TIME_CLASS_C, status); 1787 op[cur][next], CMD_TIME_CLASS_C);
1759 1788
1760 if (0 && mailbox) { 1789 if (0 && mailbox) {
1761 int i; 1790 int i;
@@ -1789,21 +1818,20 @@ int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
1789 } 1818 }
1790 1819
1791 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num, 1820 err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num,
1792 op_mod, op[cur][next], CMD_TIME_CLASS_C, status); 1821 op_mod, op[cur][next], CMD_TIME_CLASS_C);
1793 } 1822 }
1794 1823
1795 return err; 1824 return err;
1796} 1825}
1797 1826
1798int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 1827int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
1799 struct mthca_mailbox *mailbox, u8 *status) 1828 struct mthca_mailbox *mailbox)
1800{ 1829{
1801 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, 1830 return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0,
1802 CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); 1831 CMD_QUERY_QPEE, CMD_TIME_CLASS_A);
1803} 1832}
1804 1833
1805int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 1834int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
1806 u8 *status)
1807{ 1835{
1808 u8 op_mod; 1836 u8 op_mod;
1809 1837
@@ -1825,12 +1853,12 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn,
1825 } 1853 }
1826 1854
1827 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, 1855 return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP,
1828 CMD_TIME_CLASS_B, status); 1856 CMD_TIME_CLASS_B);
1829} 1857}
1830 1858
1831int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 1859int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1832 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 1860 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
1833 void *in_mad, void *response_mad, u8 *status) 1861 void *in_mad, void *response_mad)
1834{ 1862{
1835 struct mthca_mailbox *inmailbox, *outmailbox; 1863 struct mthca_mailbox *inmailbox, *outmailbox;
1836 void *inbox; 1864 void *inbox;
@@ -1897,9 +1925,9 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1897 1925
1898 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, 1926 err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma,
1899 in_modifier, op_modifier, 1927 in_modifier, op_modifier,
1900 CMD_MAD_IFC, CMD_TIME_CLASS_C, status); 1928 CMD_MAD_IFC, CMD_TIME_CLASS_C);
1901 1929
1902 if (!err && !*status) 1930 if (!err)
1903 memcpy(response_mad, outmailbox->buf, 256); 1931 memcpy(response_mad, outmailbox->buf, 256);
1904 1932
1905 mthca_free_mailbox(dev, inmailbox); 1933 mthca_free_mailbox(dev, inmailbox);
@@ -1908,33 +1936,33 @@ int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
1908} 1936}
1909 1937
1910int mthca_READ_MGM(struct mthca_dev *dev, int index, 1938int mthca_READ_MGM(struct mthca_dev *dev, int index,
1911 struct mthca_mailbox *mailbox, u8 *status) 1939 struct mthca_mailbox *mailbox)
1912{ 1940{
1913 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, 1941 return mthca_cmd_box(dev, 0, mailbox->dma, index, 0,
1914 CMD_READ_MGM, CMD_TIME_CLASS_A, status); 1942 CMD_READ_MGM, CMD_TIME_CLASS_A);
1915} 1943}
1916 1944
1917int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 1945int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
1918 struct mthca_mailbox *mailbox, u8 *status) 1946 struct mthca_mailbox *mailbox)
1919{ 1947{
1920 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, 1948 return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM,
1921 CMD_TIME_CLASS_A, status); 1949 CMD_TIME_CLASS_A);
1922} 1950}
1923 1951
1924int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 1952int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
1925 u16 *hash, u8 *status) 1953 u16 *hash)
1926{ 1954{
1927 u64 imm; 1955 u64 imm;
1928 int err; 1956 int err;
1929 1957
1930 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, 1958 err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH,
1931 CMD_TIME_CLASS_A, status); 1959 CMD_TIME_CLASS_A);
1932 1960
1933 *hash = imm; 1961 *hash = imm;
1934 return err; 1962 return err;
1935} 1963}
1936 1964
1937int mthca_NOP(struct mthca_dev *dev, u8 *status) 1965int mthca_NOP(struct mthca_dev *dev)
1938{ 1966{
1939 return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100), status); 1967 return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, msecs_to_jiffies(100));
1940} 1968}
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.h b/drivers/infiniband/hw/mthca/mthca_cmd.h
index 6efd3265f248..f952244c54de 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.h
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.h
@@ -252,79 +252,74 @@ struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev,
252 gfp_t gfp_mask); 252 gfp_t gfp_mask);
253void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); 253void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox);
254 254
255int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); 255int mthca_SYS_EN(struct mthca_dev *dev);
256int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); 256int mthca_SYS_DIS(struct mthca_dev *dev);
257int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); 257int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm);
258int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status); 258int mthca_UNMAP_FA(struct mthca_dev *dev);
259int mthca_RUN_FW(struct mthca_dev *dev, u8 *status); 259int mthca_RUN_FW(struct mthca_dev *dev);
260int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status); 260int mthca_QUERY_FW(struct mthca_dev *dev);
261int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status); 261int mthca_ENABLE_LAM(struct mthca_dev *dev);
262int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status); 262int mthca_DISABLE_LAM(struct mthca_dev *dev);
263int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status); 263int mthca_QUERY_DDR(struct mthca_dev *dev);
264int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, 264int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
265 struct mthca_dev_lim *dev_lim, u8 *status); 265 struct mthca_dev_lim *dev_lim);
266int mthca_QUERY_ADAPTER(struct mthca_dev *dev, 266int mthca_QUERY_ADAPTER(struct mthca_dev *dev,
267 struct mthca_adapter *adapter, u8 *status); 267 struct mthca_adapter *adapter);
268int mthca_INIT_HCA(struct mthca_dev *dev, 268int mthca_INIT_HCA(struct mthca_dev *dev,
269 struct mthca_init_hca_param *param, 269 struct mthca_init_hca_param *param);
270 u8 *status);
271int mthca_INIT_IB(struct mthca_dev *dev, 270int mthca_INIT_IB(struct mthca_dev *dev,
272 struct mthca_init_ib_param *param, 271 struct mthca_init_ib_param *param,
273 int port, u8 *status); 272 int port);
274int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status); 273int mthca_CLOSE_IB(struct mthca_dev *dev, int port);
275int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status); 274int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic);
276int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, 275int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param,
277 int port, u8 *status); 276 int port);
278int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status); 277int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt);
279int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status); 278int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt);
280int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status); 279int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count);
281int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); 280int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm);
282int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); 281int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev);
283int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, 282int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages);
284 u8 *status);
285int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 283int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
286 int mpt_index, u8 *status); 284 int mpt_index);
287int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 285int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
288 int mpt_index, u8 *status); 286 int mpt_index);
289int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 287int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
290 int num_mtt, u8 *status); 288 int num_mtt);
291int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); 289int mthca_SYNC_TPT(struct mthca_dev *dev);
292int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, 290int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
293 int eq_num, u8 *status); 291 int eq_num);
294int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 292int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
295 int eq_num, u8 *status); 293 int eq_num);
296int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 294int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
297 int eq_num, u8 *status); 295 int eq_num);
298int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 296int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
299 int cq_num, u8 *status); 297 int cq_num);
300int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 298int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
301 int cq_num, u8 *status); 299 int cq_num);
302int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size, 300int mthca_RESIZE_CQ(struct mthca_dev *dev, int cq_num, u32 lkey, u8 log_size);
303 u8 *status);
304int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 301int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
305 int srq_num, u8 *status); 302 int srq_num);
306int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 303int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
307 int srq_num, u8 *status); 304 int srq_num);
308int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, 305int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num,
309 struct mthca_mailbox *mailbox, u8 *status); 306 struct mthca_mailbox *mailbox);
310int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); 307int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit);
311int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur, 308int mthca_MODIFY_QP(struct mthca_dev *dev, enum ib_qp_state cur,
312 enum ib_qp_state next, u32 num, int is_ee, 309 enum ib_qp_state next, u32 num, int is_ee,
313 struct mthca_mailbox *mailbox, u32 optmask, 310 struct mthca_mailbox *mailbox, u32 optmask);
314 u8 *status);
315int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, 311int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee,
316 struct mthca_mailbox *mailbox, u8 *status); 312 struct mthca_mailbox *mailbox);
317int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, 313int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn);
318 u8 *status);
319int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, 314int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
320 int port, struct ib_wc *in_wc, struct ib_grh *in_grh, 315 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
321 void *in_mad, void *response_mad, u8 *status); 316 void *in_mad, void *response_mad);
322int mthca_READ_MGM(struct mthca_dev *dev, int index, 317int mthca_READ_MGM(struct mthca_dev *dev, int index,
323 struct mthca_mailbox *mailbox, u8 *status); 318 struct mthca_mailbox *mailbox);
324int mthca_WRITE_MGM(struct mthca_dev *dev, int index, 319int mthca_WRITE_MGM(struct mthca_dev *dev, int index,
325 struct mthca_mailbox *mailbox, u8 *status); 320 struct mthca_mailbox *mailbox);
326int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, 321int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
327 u16 *hash, u8 *status); 322 u16 *hash);
328int mthca_NOP(struct mthca_dev *dev, u8 *status); 323int mthca_NOP(struct mthca_dev *dev);
329 324
330#endif /* MTHCA_CMD_H */ 325#endif /* MTHCA_CMD_H */
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 18ee3fa4b88c..53157b86a1ba 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -779,7 +779,6 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
779 struct mthca_mailbox *mailbox; 779 struct mthca_mailbox *mailbox;
780 struct mthca_cq_context *cq_context; 780 struct mthca_cq_context *cq_context;
781 int err = -ENOMEM; 781 int err = -ENOMEM;
782 u8 status;
783 782
784 cq->ibcq.cqe = nent - 1; 783 cq->ibcq.cqe = nent - 1;
785 cq->is_kernel = !ctx; 784 cq->is_kernel = !ctx;
@@ -847,19 +846,12 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
847 cq_context->state_db = cpu_to_be32(cq->arm_db_index); 846 cq_context->state_db = cpu_to_be32(cq->arm_db_index);
848 } 847 }
849 848
850 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); 849 err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn);
851 if (err) { 850 if (err) {
852 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err); 851 mthca_warn(dev, "SW2HW_CQ failed (%d)\n", err);
853 goto err_out_free_mr; 852 goto err_out_free_mr;
854 } 853 }
855 854
856 if (status) {
857 mthca_warn(dev, "SW2HW_CQ returned status 0x%02x\n",
858 status);
859 err = -EINVAL;
860 goto err_out_free_mr;
861 }
862
863 spin_lock_irq(&dev->cq_table.lock); 855 spin_lock_irq(&dev->cq_table.lock);
864 if (mthca_array_set(&dev->cq_table.cq, 856 if (mthca_array_set(&dev->cq_table.cq,
865 cq->cqn & (dev->limits.num_cqs - 1), 857 cq->cqn & (dev->limits.num_cqs - 1),
@@ -915,7 +907,6 @@ void mthca_free_cq(struct mthca_dev *dev,
915{ 907{
916 struct mthca_mailbox *mailbox; 908 struct mthca_mailbox *mailbox;
917 int err; 909 int err;
918 u8 status;
919 910
920 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 911 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
921 if (IS_ERR(mailbox)) { 912 if (IS_ERR(mailbox)) {
@@ -923,11 +914,9 @@ void mthca_free_cq(struct mthca_dev *dev,
923 return; 914 return;
924 } 915 }
925 916
926 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); 917 err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn);
927 if (err) 918 if (err)
928 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err); 919 mthca_warn(dev, "HW2SW_CQ failed (%d)\n", err);
929 else if (status)
930 mthca_warn(dev, "HW2SW_CQ returned status 0x%02x\n", status);
931 920
932 if (0) { 921 if (0) {
933 __be32 *ctx = mailbox->buf; 922 __be32 *ctx = mailbox->buf;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 76785c653c13..7c9d35f39d75 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -474,7 +474,6 @@ static int mthca_create_eq(struct mthca_dev *dev,
474 struct mthca_eq_context *eq_context; 474 struct mthca_eq_context *eq_context;
475 int err = -ENOMEM; 475 int err = -ENOMEM;
476 int i; 476 int i;
477 u8 status;
478 477
479 eq->dev = dev; 478 eq->dev = dev;
480 eq->nent = roundup_pow_of_two(max(nent, 2)); 479 eq->nent = roundup_pow_of_two(max(nent, 2));
@@ -543,15 +542,9 @@ static int mthca_create_eq(struct mthca_dev *dev,
543 eq_context->intr = intr; 542 eq_context->intr = intr;
544 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey); 543 eq_context->lkey = cpu_to_be32(eq->mr.ibmr.lkey);
545 544
546 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); 545 err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn);
547 if (err) { 546 if (err) {
548 mthca_warn(dev, "SW2HW_EQ failed (%d)\n", err); 547 mthca_warn(dev, "SW2HW_EQ returned %d\n", err);
549 goto err_out_free_mr;
550 }
551 if (status) {
552 mthca_warn(dev, "SW2HW_EQ returned status 0x%02x\n",
553 status);
554 err = -EINVAL;
555 goto err_out_free_mr; 548 goto err_out_free_mr;
556 } 549 }
557 550
@@ -597,7 +590,6 @@ static void mthca_free_eq(struct mthca_dev *dev,
597{ 590{
598 struct mthca_mailbox *mailbox; 591 struct mthca_mailbox *mailbox;
599 int err; 592 int err;
600 u8 status;
601 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / 593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) /
602 PAGE_SIZE; 594 PAGE_SIZE;
603 int i; 595 int i;
@@ -606,11 +598,9 @@ static void mthca_free_eq(struct mthca_dev *dev,
606 if (IS_ERR(mailbox)) 598 if (IS_ERR(mailbox))
607 return; 599 return;
608 600
609 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); 601 err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn);
610 if (err) 602 if (err)
611 mthca_warn(dev, "HW2SW_EQ failed (%d)\n", err); 603 mthca_warn(dev, "HW2SW_EQ returned %d\n", err);
612 if (status)
613 mthca_warn(dev, "HW2SW_EQ returned status 0x%02x\n", status);
614 604
615 dev->eq_table.arm_mask &= ~eq->eqn_mask; 605 dev->eq_table.arm_mask &= ~eq->eqn_mask;
616 606
@@ -738,7 +728,6 @@ static void mthca_unmap_eq_regs(struct mthca_dev *dev)
738int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) 728int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
739{ 729{
740 int ret; 730 int ret;
741 u8 status;
742 731
743 /* 732 /*
744 * We assume that mapping one page is enough for the whole EQ 733 * We assume that mapping one page is enough for the whole EQ
@@ -757,9 +746,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
757 return -ENOMEM; 746 return -ENOMEM;
758 } 747 }
759 748
760 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt, &status); 749 ret = mthca_MAP_ICM_page(dev, dev->eq_table.icm_dma, icm_virt);
761 if (!ret && status)
762 ret = -EINVAL;
763 if (ret) { 750 if (ret) {
764 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, 751 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
765 PCI_DMA_BIDIRECTIONAL); 752 PCI_DMA_BIDIRECTIONAL);
@@ -771,9 +758,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
771 758
772void mthca_unmap_eq_icm(struct mthca_dev *dev) 759void mthca_unmap_eq_icm(struct mthca_dev *dev)
773{ 760{
774 u8 status; 761 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1);
775
776 mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, 1, &status);
777 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE, 762 pci_unmap_page(dev->pdev, dev->eq_table.icm_dma, PAGE_SIZE,
778 PCI_DMA_BIDIRECTIONAL); 763 PCI_DMA_BIDIRECTIONAL);
779 __free_page(dev->eq_table.icm_page); 764 __free_page(dev->eq_table.icm_page);
@@ -782,7 +767,6 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev)
782int mthca_init_eq_table(struct mthca_dev *dev) 767int mthca_init_eq_table(struct mthca_dev *dev)
783{ 768{
784 int err; 769 int err;
785 u8 status;
786 u8 intr; 770 u8 intr;
787 int i; 771 int i;
788 772
@@ -864,22 +848,16 @@ int mthca_init_eq_table(struct mthca_dev *dev)
864 } 848 }
865 849
866 err = mthca_MAP_EQ(dev, async_mask(dev), 850 err = mthca_MAP_EQ(dev, async_mask(dev),
867 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); 851 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
868 if (err) 852 if (err)
869 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n", 853 mthca_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
870 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err); 854 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err);
871 if (status)
872 mthca_warn(dev, "MAP_EQ for async EQ %d returned status 0x%02x\n",
873 dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status);
874 855
875 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 856 err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
876 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); 857 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
877 if (err) 858 if (err)
878 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n", 859 mthca_warn(dev, "MAP_EQ for cmd EQ %d failed (%d)\n",
879 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err); 860 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err);
880 if (status)
881 mthca_warn(dev, "MAP_EQ for cmd EQ %d returned status 0x%02x\n",
882 dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
883 861
884 for (i = 0; i < MTHCA_NUM_EQ; ++i) 862 for (i = 0; i < MTHCA_NUM_EQ; ++i)
885 if (mthca_is_memfree(dev)) 863 if (mthca_is_memfree(dev))
@@ -909,15 +887,14 @@ err_out_free:
909 887
910void mthca_cleanup_eq_table(struct mthca_dev *dev) 888void mthca_cleanup_eq_table(struct mthca_dev *dev)
911{ 889{
912 u8 status;
913 int i; 890 int i;
914 891
915 mthca_free_irqs(dev); 892 mthca_free_irqs(dev);
916 893
917 mthca_MAP_EQ(dev, async_mask(dev), 894 mthca_MAP_EQ(dev, async_mask(dev),
918 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); 895 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
919 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, 896 mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK,
920 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); 897 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn);
921 898
922 for (i = 0; i < MTHCA_NUM_EQ; ++i) 899 for (i = 0; i < MTHCA_NUM_EQ; ++i)
923 mthca_free_eq(dev, &dev->eq_table.eq[i]); 900 mthca_free_eq(dev, &dev->eq_table.eq[i]);
diff --git a/drivers/infiniband/hw/mthca/mthca_mad.c b/drivers/infiniband/hw/mthca/mthca_mad.c
index 03a59534f59e..b6f7f457fc55 100644
--- a/drivers/infiniband/hw/mthca/mthca_mad.c
+++ b/drivers/infiniband/hw/mthca/mthca_mad.c
@@ -201,7 +201,6 @@ int mthca_process_mad(struct ib_device *ibdev,
201 struct ib_mad *out_mad) 201 struct ib_mad *out_mad)
202{ 202{
203 int err; 203 int err;
204 u8 status;
205 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE); 204 u16 slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
206 u16 prev_lid = 0; 205 u16 prev_lid = 0;
207 struct ib_port_attr pattr; 206 struct ib_port_attr pattr;
@@ -252,17 +251,11 @@ int mthca_process_mad(struct ib_device *ibdev,
252 err = mthca_MAD_IFC(to_mdev(ibdev), 251 err = mthca_MAD_IFC(to_mdev(ibdev),
253 mad_flags & IB_MAD_IGNORE_MKEY, 252 mad_flags & IB_MAD_IGNORE_MKEY,
254 mad_flags & IB_MAD_IGNORE_BKEY, 253 mad_flags & IB_MAD_IGNORE_BKEY,
255 port_num, in_wc, in_grh, in_mad, out_mad, 254 port_num, in_wc, in_grh, in_mad, out_mad);
256 &status); 255 if (err == -EBADMSG)
257 if (err) {
258 mthca_err(to_mdev(ibdev), "MAD_IFC failed\n");
259 return IB_MAD_RESULT_FAILURE;
260 }
261 if (status == MTHCA_CMD_STAT_BAD_PKT)
262 return IB_MAD_RESULT_SUCCESS; 256 return IB_MAD_RESULT_SUCCESS;
263 if (status) { 257 else if (err) {
264 mthca_err(to_mdev(ibdev), "MAD_IFC returned status %02x\n", 258 mthca_err(to_mdev(ibdev), "MAD_IFC returned %d\n", err);
265 status);
266 return IB_MAD_RESULT_FAILURE; 259 return IB_MAD_RESULT_FAILURE;
267 } 260 }
268 261
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index f24b79b805f2..aa12a533ae9e 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -149,7 +149,7 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
149 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE)) 149 } else if (!(mdev->mthca_flags & MTHCA_FLAG_PCIE))
150 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n"); 150 mthca_info(mdev, "No PCI-X capability, not setting RBC.\n");
151 151
152 if (pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP)) { 152 if (pci_is_pcie(mdev->pdev)) {
153 if (pcie_set_readrq(mdev->pdev, 4096)) { 153 if (pcie_set_readrq(mdev->pdev, 4096)) {
154 mthca_err(mdev, "Couldn't write PCI Express read request, " 154 mthca_err(mdev, "Couldn't write PCI Express read request, "
155 "aborting.\n"); 155 "aborting.\n");
@@ -165,19 +165,14 @@ static int mthca_tune_pci(struct mthca_dev *mdev)
165static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) 165static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
166{ 166{
167 int err; 167 int err;
168 u8 status;
169 168
170 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8; 169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
171 err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); 170 err = mthca_QUERY_DEV_LIM(mdev, dev_lim);
172 if (err) { 171 if (err) {
173 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 172 mthca_err(mdev, "QUERY_DEV_LIM command returned %d"
173 ", aborting.\n", err);
174 return err; 174 return err;
175 } 175 }
176 if (status) {
177 mthca_err(mdev, "QUERY_DEV_LIM returned status 0x%02x, "
178 "aborting.\n", status);
179 return -EINVAL;
180 }
181 if (dev_lim->min_page_sz > PAGE_SIZE) { 176 if (dev_lim->min_page_sz > PAGE_SIZE) {
182 mthca_err(mdev, "HCA minimum page size of %d bigger than " 177 mthca_err(mdev, "HCA minimum page size of %d bigger than "
183 "kernel PAGE_SIZE of %ld, aborting.\n", 178 "kernel PAGE_SIZE of %ld, aborting.\n",
@@ -293,49 +288,32 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
293static int mthca_init_tavor(struct mthca_dev *mdev) 288static int mthca_init_tavor(struct mthca_dev *mdev)
294{ 289{
295 s64 size; 290 s64 size;
296 u8 status;
297 int err; 291 int err;
298 struct mthca_dev_lim dev_lim; 292 struct mthca_dev_lim dev_lim;
299 struct mthca_profile profile; 293 struct mthca_profile profile;
300 struct mthca_init_hca_param init_hca; 294 struct mthca_init_hca_param init_hca;
301 295
302 err = mthca_SYS_EN(mdev, &status); 296 err = mthca_SYS_EN(mdev);
303 if (err) { 297 if (err) {
304 mthca_err(mdev, "SYS_EN command failed, aborting.\n"); 298 mthca_err(mdev, "SYS_EN command returned %d, aborting.\n", err);
305 return err; 299 return err;
306 } 300 }
307 if (status) {
308 mthca_err(mdev, "SYS_EN returned status 0x%02x, "
309 "aborting.\n", status);
310 return -EINVAL;
311 }
312 301
313 err = mthca_QUERY_FW(mdev, &status); 302 err = mthca_QUERY_FW(mdev);
314 if (err) { 303 if (err) {
315 mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); 304 mthca_err(mdev, "QUERY_FW command returned %d,"
316 goto err_disable; 305 " aborting.\n", err);
317 }
318 if (status) {
319 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
320 "aborting.\n", status);
321 err = -EINVAL;
322 goto err_disable; 306 goto err_disable;
323 } 307 }
324 err = mthca_QUERY_DDR(mdev, &status); 308 err = mthca_QUERY_DDR(mdev);
325 if (err) { 309 if (err) {
326 mthca_err(mdev, "QUERY_DDR command failed, aborting.\n"); 310 mthca_err(mdev, "QUERY_DDR command returned %d, aborting.\n", err);
327 goto err_disable;
328 }
329 if (status) {
330 mthca_err(mdev, "QUERY_DDR returned status 0x%02x, "
331 "aborting.\n", status);
332 err = -EINVAL;
333 goto err_disable; 311 goto err_disable;
334 } 312 }
335 313
336 err = mthca_dev_lim(mdev, &dev_lim); 314 err = mthca_dev_lim(mdev, &dev_lim);
337 if (err) { 315 if (err) {
338 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 316 mthca_err(mdev, "QUERY_DEV_LIM command returned %d, aborting.\n", err);
339 goto err_disable; 317 goto err_disable;
340 } 318 }
341 319
@@ -351,29 +329,22 @@ static int mthca_init_tavor(struct mthca_dev *mdev)
351 goto err_disable; 329 goto err_disable;
352 } 330 }
353 331
354 err = mthca_INIT_HCA(mdev, &init_hca, &status); 332 err = mthca_INIT_HCA(mdev, &init_hca);
355 if (err) { 333 if (err) {
356 mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); 334 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
357 goto err_disable;
358 }
359 if (status) {
360 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
361 "aborting.\n", status);
362 err = -EINVAL;
363 goto err_disable; 335 goto err_disable;
364 } 336 }
365 337
366 return 0; 338 return 0;
367 339
368err_disable: 340err_disable:
369 mthca_SYS_DIS(mdev, &status); 341 mthca_SYS_DIS(mdev);
370 342
371 return err; 343 return err;
372} 344}
373 345
374static int mthca_load_fw(struct mthca_dev *mdev) 346static int mthca_load_fw(struct mthca_dev *mdev)
375{ 347{
376 u8 status;
377 int err; 348 int err;
378 349
379 /* FIXME: use HCA-attached memory for FW if present */ 350 /* FIXME: use HCA-attached memory for FW if present */
@@ -386,31 +357,21 @@ static int mthca_load_fw(struct mthca_dev *mdev)
386 return -ENOMEM; 357 return -ENOMEM;
387 } 358 }
388 359
389 err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status); 360 err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm);
390 if (err) { 361 if (err) {
391 mthca_err(mdev, "MAP_FA command failed, aborting.\n"); 362 mthca_err(mdev, "MAP_FA command returned %d, aborting.\n", err);
392 goto err_free;
393 }
394 if (status) {
395 mthca_err(mdev, "MAP_FA returned status 0x%02x, aborting.\n", status);
396 err = -EINVAL;
397 goto err_free; 363 goto err_free;
398 } 364 }
399 err = mthca_RUN_FW(mdev, &status); 365 err = mthca_RUN_FW(mdev);
400 if (err) { 366 if (err) {
401 mthca_err(mdev, "RUN_FW command failed, aborting.\n"); 367 mthca_err(mdev, "RUN_FW command returned %d, aborting.\n", err);
402 goto err_unmap_fa;
403 }
404 if (status) {
405 mthca_err(mdev, "RUN_FW returned status 0x%02x, aborting.\n", status);
406 err = -EINVAL;
407 goto err_unmap_fa; 368 goto err_unmap_fa;
408 } 369 }
409 370
410 return 0; 371 return 0;
411 372
412err_unmap_fa: 373err_unmap_fa:
413 mthca_UNMAP_FA(mdev, &status); 374 mthca_UNMAP_FA(mdev);
414 375
415err_free: 376err_free:
416 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); 377 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
@@ -423,19 +384,13 @@ static int mthca_init_icm(struct mthca_dev *mdev,
423 u64 icm_size) 384 u64 icm_size)
424{ 385{
425 u64 aux_pages; 386 u64 aux_pages;
426 u8 status;
427 int err; 387 int err;
428 388
429 err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status); 389 err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages);
430 if (err) { 390 if (err) {
431 mthca_err(mdev, "SET_ICM_SIZE command failed, aborting.\n"); 391 mthca_err(mdev, "SET_ICM_SIZE command returned %d, aborting.\n", err);
432 return err; 392 return err;
433 } 393 }
434 if (status) {
435 mthca_err(mdev, "SET_ICM_SIZE returned status 0x%02x, "
436 "aborting.\n", status);
437 return -EINVAL;
438 }
439 394
440 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n", 395 mthca_dbg(mdev, "%lld KB of HCA context requires %lld KB aux memory.\n",
441 (unsigned long long) icm_size >> 10, 396 (unsigned long long) icm_size >> 10,
@@ -448,14 +403,9 @@ static int mthca_init_icm(struct mthca_dev *mdev,
448 return -ENOMEM; 403 return -ENOMEM;
449 } 404 }
450 405
451 err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status); 406 err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm);
452 if (err) { 407 if (err) {
453 mthca_err(mdev, "MAP_ICM_AUX command failed, aborting.\n"); 408 mthca_err(mdev, "MAP_ICM_AUX returned %d, aborting.\n", err);
454 goto err_free_aux;
455 }
456 if (status) {
457 mthca_err(mdev, "MAP_ICM_AUX returned status 0x%02x, aborting.\n", status);
458 err = -EINVAL;
459 goto err_free_aux; 409 goto err_free_aux;
460 } 410 }
461 411
@@ -596,7 +546,7 @@ err_unmap_eq:
596 mthca_unmap_eq_icm(mdev); 546 mthca_unmap_eq_icm(mdev);
597 547
598err_unmap_aux: 548err_unmap_aux:
599 mthca_UNMAP_ICM_AUX(mdev, &status); 549 mthca_UNMAP_ICM_AUX(mdev);
600 550
601err_free_aux: 551err_free_aux:
602 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); 552 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
@@ -606,7 +556,6 @@ err_free_aux:
606 556
607static void mthca_free_icms(struct mthca_dev *mdev) 557static void mthca_free_icms(struct mthca_dev *mdev)
608{ 558{
609 u8 status;
610 559
611 mthca_free_icm_table(mdev, mdev->mcg_table.table); 560 mthca_free_icm_table(mdev, mdev->mcg_table.table);
612 if (mdev->mthca_flags & MTHCA_FLAG_SRQ) 561 if (mdev->mthca_flags & MTHCA_FLAG_SRQ)
@@ -619,7 +568,7 @@ static void mthca_free_icms(struct mthca_dev *mdev)
619 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); 568 mthca_free_icm_table(mdev, mdev->mr_table.mtt_table);
620 mthca_unmap_eq_icm(mdev); 569 mthca_unmap_eq_icm(mdev);
621 570
622 mthca_UNMAP_ICM_AUX(mdev, &status); 571 mthca_UNMAP_ICM_AUX(mdev);
623 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0); 572 mthca_free_icm(mdev, mdev->fw.arbel.aux_icm, 0);
624} 573}
625 574
@@ -629,43 +578,32 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
629 struct mthca_profile profile; 578 struct mthca_profile profile;
630 struct mthca_init_hca_param init_hca; 579 struct mthca_init_hca_param init_hca;
631 s64 icm_size; 580 s64 icm_size;
632 u8 status;
633 int err; 581 int err;
634 582
635 err = mthca_QUERY_FW(mdev, &status); 583 err = mthca_QUERY_FW(mdev);
636 if (err) { 584 if (err) {
637 mthca_err(mdev, "QUERY_FW command failed, aborting.\n"); 585 mthca_err(mdev, "QUERY_FW command failed %d, aborting.\n", err);
638 return err; 586 return err;
639 } 587 }
640 if (status) {
641 mthca_err(mdev, "QUERY_FW returned status 0x%02x, "
642 "aborting.\n", status);
643 return -EINVAL;
644 }
645 588
646 err = mthca_ENABLE_LAM(mdev, &status); 589 err = mthca_ENABLE_LAM(mdev);
647 if (err) { 590 if (err == -EAGAIN) {
648 mthca_err(mdev, "ENABLE_LAM command failed, aborting.\n");
649 return err;
650 }
651 if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) {
652 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n"); 591 mthca_dbg(mdev, "No HCA-attached memory (running in MemFree mode)\n");
653 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; 592 mdev->mthca_flags |= MTHCA_FLAG_NO_LAM;
654 } else if (status) { 593 } else if (err) {
655 mthca_err(mdev, "ENABLE_LAM returned status 0x%02x, " 594 mthca_err(mdev, "ENABLE_LAM returned %d, aborting.\n", err);
656 "aborting.\n", status); 595 return err;
657 return -EINVAL;
658 } 596 }
659 597
660 err = mthca_load_fw(mdev); 598 err = mthca_load_fw(mdev);
661 if (err) { 599 if (err) {
662 mthca_err(mdev, "Failed to start FW, aborting.\n"); 600 mthca_err(mdev, "Loading FW returned %d, aborting.\n", err);
663 goto err_disable; 601 goto err_disable;
664 } 602 }
665 603
666 err = mthca_dev_lim(mdev, &dev_lim); 604 err = mthca_dev_lim(mdev, &dev_lim);
667 if (err) { 605 if (err) {
668 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 606 mthca_err(mdev, "QUERY_DEV_LIM returned %d, aborting.\n", err);
669 goto err_stop_fw; 607 goto err_stop_fw;
670 } 608 }
671 609
@@ -685,15 +623,9 @@ static int mthca_init_arbel(struct mthca_dev *mdev)
685 if (err) 623 if (err)
686 goto err_stop_fw; 624 goto err_stop_fw;
687 625
688 err = mthca_INIT_HCA(mdev, &init_hca, &status); 626 err = mthca_INIT_HCA(mdev, &init_hca);
689 if (err) { 627 if (err) {
690 mthca_err(mdev, "INIT_HCA command failed, aborting.\n"); 628 mthca_err(mdev, "INIT_HCA command returned %d, aborting.\n", err);
691 goto err_free_icm;
692 }
693 if (status) {
694 mthca_err(mdev, "INIT_HCA returned status 0x%02x, "
695 "aborting.\n", status);
696 err = -EINVAL;
697 goto err_free_icm; 629 goto err_free_icm;
698 } 630 }
699 631
@@ -703,37 +635,34 @@ err_free_icm:
703 mthca_free_icms(mdev); 635 mthca_free_icms(mdev);
704 636
705err_stop_fw: 637err_stop_fw:
706 mthca_UNMAP_FA(mdev, &status); 638 mthca_UNMAP_FA(mdev);
707 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); 639 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
708 640
709err_disable: 641err_disable:
710 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 642 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
711 mthca_DISABLE_LAM(mdev, &status); 643 mthca_DISABLE_LAM(mdev);
712 644
713 return err; 645 return err;
714} 646}
715 647
716static void mthca_close_hca(struct mthca_dev *mdev) 648static void mthca_close_hca(struct mthca_dev *mdev)
717{ 649{
718 u8 status; 650 mthca_CLOSE_HCA(mdev, 0);
719
720 mthca_CLOSE_HCA(mdev, 0, &status);
721 651
722 if (mthca_is_memfree(mdev)) { 652 if (mthca_is_memfree(mdev)) {
723 mthca_free_icms(mdev); 653 mthca_free_icms(mdev);
724 654
725 mthca_UNMAP_FA(mdev, &status); 655 mthca_UNMAP_FA(mdev);
726 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0); 656 mthca_free_icm(mdev, mdev->fw.arbel.fw_icm, 0);
727 657
728 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) 658 if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM))
729 mthca_DISABLE_LAM(mdev, &status); 659 mthca_DISABLE_LAM(mdev);
730 } else 660 } else
731 mthca_SYS_DIS(mdev, &status); 661 mthca_SYS_DIS(mdev);
732} 662}
733 663
734static int mthca_init_hca(struct mthca_dev *mdev) 664static int mthca_init_hca(struct mthca_dev *mdev)
735{ 665{
736 u8 status;
737 int err; 666 int err;
738 struct mthca_adapter adapter; 667 struct mthca_adapter adapter;
739 668
@@ -745,15 +674,9 @@ static int mthca_init_hca(struct mthca_dev *mdev)
745 if (err) 674 if (err)
746 return err; 675 return err;
747 676
748 err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); 677 err = mthca_QUERY_ADAPTER(mdev, &adapter);
749 if (err) { 678 if (err) {
750 mthca_err(mdev, "QUERY_ADAPTER command failed, aborting.\n"); 679 mthca_err(mdev, "QUERY_ADAPTER command returned %d, aborting.\n", err);
751 goto err_close;
752 }
753 if (status) {
754 mthca_err(mdev, "QUERY_ADAPTER returned status 0x%02x, "
755 "aborting.\n", status);
756 err = -EINVAL;
757 goto err_close; 680 goto err_close;
758 } 681 }
759 682
@@ -772,7 +695,6 @@ err_close:
772static int mthca_setup_hca(struct mthca_dev *dev) 695static int mthca_setup_hca(struct mthca_dev *dev)
773{ 696{
774 int err; 697 int err;
775 u8 status;
776 698
777 MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock); 699 MTHCA_INIT_DOORBELL_LOCK(&dev->doorbell_lock);
778 700
@@ -833,8 +755,8 @@ static int mthca_setup_hca(struct mthca_dev *dev)
833 goto err_eq_table_free; 755 goto err_eq_table_free;
834 } 756 }
835 757
836 err = mthca_NOP(dev, &status); 758 err = mthca_NOP(dev);
837 if (err || status) { 759 if (err) {
838 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { 760 if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
839 mthca_warn(dev, "NOP command failed to generate interrupt " 761 mthca_warn(dev, "NOP command failed to generate interrupt "
840 "(IRQ %d).\n", 762 "(IRQ %d).\n",
@@ -1166,7 +1088,6 @@ err_disable_pdev:
1166static void __mthca_remove_one(struct pci_dev *pdev) 1088static void __mthca_remove_one(struct pci_dev *pdev)
1167{ 1089{
1168 struct mthca_dev *mdev = pci_get_drvdata(pdev); 1090 struct mthca_dev *mdev = pci_get_drvdata(pdev);
1169 u8 status;
1170 int p; 1091 int p;
1171 1092
1172 if (mdev) { 1093 if (mdev) {
@@ -1174,7 +1095,7 @@ static void __mthca_remove_one(struct pci_dev *pdev)
1174 mthca_unregister_device(mdev); 1095 mthca_unregister_device(mdev);
1175 1096
1176 for (p = 1; p <= mdev->limits.num_ports; ++p) 1097 for (p = 1; p <= mdev->limits.num_ports; ++p)
1177 mthca_CLOSE_IB(mdev, p, &status); 1098 mthca_CLOSE_IB(mdev, p);
1178 1099
1179 mthca_cleanup_mcg_table(mdev); 1100 mthca_cleanup_mcg_table(mdev);
1180 mthca_cleanup_av_table(mdev); 1101 mthca_cleanup_av_table(mdev);
diff --git a/drivers/infiniband/hw/mthca/mthca_mcg.c b/drivers/infiniband/hw/mthca/mthca_mcg.c
index 515790a606e6..6304ae8f4a6c 100644
--- a/drivers/infiniband/hw/mthca/mthca_mcg.c
+++ b/drivers/infiniband/hw/mthca/mthca_mcg.c
@@ -68,7 +68,6 @@ static int find_mgm(struct mthca_dev *dev,
68 struct mthca_mgm *mgm = mgm_mailbox->buf; 68 struct mthca_mgm *mgm = mgm_mailbox->buf;
69 u8 *mgid; 69 u8 *mgid;
70 int err; 70 int err;
71 u8 status;
72 71
73 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 72 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
74 if (IS_ERR(mailbox)) 73 if (IS_ERR(mailbox))
@@ -77,12 +76,9 @@ static int find_mgm(struct mthca_dev *dev,
77 76
78 memcpy(mgid, gid, 16); 77 memcpy(mgid, gid, 16);
79 78
80 err = mthca_MGID_HASH(dev, mailbox, hash, &status); 79 err = mthca_MGID_HASH(dev, mailbox, hash);
81 if (err) 80 if (err) {
82 goto out; 81 mthca_err(dev, "MGID_HASH failed (%d)\n", err);
83 if (status) {
84 mthca_err(dev, "MGID_HASH returned status %02x\n", status);
85 err = -EINVAL;
86 goto out; 82 goto out;
87 } 83 }
88 84
@@ -93,12 +89,9 @@ static int find_mgm(struct mthca_dev *dev,
93 *prev = -1; 89 *prev = -1;
94 90
95 do { 91 do {
96 err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); 92 err = mthca_READ_MGM(dev, *index, mgm_mailbox);
97 if (err) 93 if (err) {
98 goto out; 94 mthca_err(dev, "READ_MGM failed (%d)\n", err);
99 if (status) {
100 mthca_err(dev, "READ_MGM returned status %02x\n", status);
101 err = -EINVAL;
102 goto out; 95 goto out;
103 } 96 }
104 97
@@ -134,7 +127,6 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
134 int link = 0; 127 int link = 0;
135 int i; 128 int i;
136 int err; 129 int err;
137 u8 status;
138 130
139 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 131 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
140 if (IS_ERR(mailbox)) 132 if (IS_ERR(mailbox))
@@ -160,12 +152,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
160 goto out; 152 goto out;
161 } 153 }
162 154
163 err = mthca_READ_MGM(dev, index, mailbox, &status); 155 err = mthca_READ_MGM(dev, index, mailbox);
164 if (err) 156 if (err) {
165 goto out; 157 mthca_err(dev, "READ_MGM failed (%d)\n", err);
166 if (status) {
167 mthca_err(dev, "READ_MGM returned status %02x\n", status);
168 err = -EINVAL;
169 goto out; 158 goto out;
170 } 159 }
171 memset(mgm, 0, sizeof *mgm); 160 memset(mgm, 0, sizeof *mgm);
@@ -189,11 +178,9 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
189 goto out; 178 goto out;
190 } 179 }
191 180
192 err = mthca_WRITE_MGM(dev, index, mailbox, &status); 181 err = mthca_WRITE_MGM(dev, index, mailbox);
193 if (err) 182 if (err) {
194 goto out; 183 mthca_err(dev, "WRITE_MGM failed %d\n", err);
195 if (status) {
196 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
197 err = -EINVAL; 184 err = -EINVAL;
198 goto out; 185 goto out;
199 } 186 }
@@ -201,24 +188,17 @@ int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
201 if (!link) 188 if (!link)
202 goto out; 189 goto out;
203 190
204 err = mthca_READ_MGM(dev, prev, mailbox, &status); 191 err = mthca_READ_MGM(dev, prev, mailbox);
205 if (err) 192 if (err) {
206 goto out; 193 mthca_err(dev, "READ_MGM failed %d\n", err);
207 if (status) {
208 mthca_err(dev, "READ_MGM returned status %02x\n", status);
209 err = -EINVAL;
210 goto out; 194 goto out;
211 } 195 }
212 196
213 mgm->next_gid_index = cpu_to_be32(index << 6); 197 mgm->next_gid_index = cpu_to_be32(index << 6);
214 198
215 err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 199 err = mthca_WRITE_MGM(dev, prev, mailbox);
216 if (err) 200 if (err)
217 goto out; 201 mthca_err(dev, "WRITE_MGM returned %d\n", err);
218 if (status) {
219 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
220 err = -EINVAL;
221 }
222 202
223 out: 203 out:
224 if (err && link && index != -1) { 204 if (err && link && index != -1) {
@@ -240,7 +220,6 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
240 int prev, index; 220 int prev, index;
241 int i, loc; 221 int i, loc;
242 int err; 222 int err;
243 u8 status;
244 223
245 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 224 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
246 if (IS_ERR(mailbox)) 225 if (IS_ERR(mailbox))
@@ -275,12 +254,9 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
275 mgm->qp[loc] = mgm->qp[i - 1]; 254 mgm->qp[loc] = mgm->qp[i - 1];
276 mgm->qp[i - 1] = 0; 255 mgm->qp[i - 1] = 0;
277 256
278 err = mthca_WRITE_MGM(dev, index, mailbox, &status); 257 err = mthca_WRITE_MGM(dev, index, mailbox);
279 if (err) 258 if (err) {
280 goto out; 259 mthca_err(dev, "WRITE_MGM returned %d\n", err);
281 if (status) {
282 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
283 err = -EINVAL;
284 goto out; 260 goto out;
285 } 261 }
286 262
@@ -292,24 +268,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
292 int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6; 268 int amgm_index_to_free = be32_to_cpu(mgm->next_gid_index) >> 6;
293 if (amgm_index_to_free) { 269 if (amgm_index_to_free) {
294 err = mthca_READ_MGM(dev, amgm_index_to_free, 270 err = mthca_READ_MGM(dev, amgm_index_to_free,
295 mailbox, &status); 271 mailbox);
296 if (err) 272 if (err) {
297 goto out; 273 mthca_err(dev, "READ_MGM returned %d\n", err);
298 if (status) {
299 mthca_err(dev, "READ_MGM returned status %02x\n",
300 status);
301 err = -EINVAL;
302 goto out; 274 goto out;
303 } 275 }
304 } else 276 } else
305 memset(mgm->gid, 0, 16); 277 memset(mgm->gid, 0, 16);
306 278
307 err = mthca_WRITE_MGM(dev, index, mailbox, &status); 279 err = mthca_WRITE_MGM(dev, index, mailbox);
308 if (err) 280 if (err) {
309 goto out; 281 mthca_err(dev, "WRITE_MGM returned %d\n", err);
310 if (status) {
311 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
312 err = -EINVAL;
313 goto out; 282 goto out;
314 } 283 }
315 if (amgm_index_to_free) { 284 if (amgm_index_to_free) {
@@ -319,23 +288,17 @@ int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
319 } else { 288 } else {
320 /* Remove entry from AMGM */ 289 /* Remove entry from AMGM */
321 int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6; 290 int curr_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
322 err = mthca_READ_MGM(dev, prev, mailbox, &status); 291 err = mthca_READ_MGM(dev, prev, mailbox);
323 if (err) 292 if (err) {
324 goto out; 293 mthca_err(dev, "READ_MGM returned %d\n", err);
325 if (status) {
326 mthca_err(dev, "READ_MGM returned status %02x\n", status);
327 err = -EINVAL;
328 goto out; 294 goto out;
329 } 295 }
330 296
331 mgm->next_gid_index = cpu_to_be32(curr_next_index << 6); 297 mgm->next_gid_index = cpu_to_be32(curr_next_index << 6);
332 298
333 err = mthca_WRITE_MGM(dev, prev, mailbox, &status); 299 err = mthca_WRITE_MGM(dev, prev, mailbox);
334 if (err) 300 if (err) {
335 goto out; 301 mthca_err(dev, "WRITE_MGM returned %d\n", err);
336 if (status) {
337 mthca_err(dev, "WRITE_MGM returned status %02x\n", status);
338 err = -EINVAL;
339 goto out; 302 goto out;
340 } 303 }
341 BUG_ON(index < dev->limits.num_mgms); 304 BUG_ON(index < dev->limits.num_mgms);
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 8c2a83732b5d..7d2e42dd6926 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -223,7 +223,6 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
223{ 223{
224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; 224 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
225 int ret = 0; 225 int ret = 0;
226 u8 status;
227 226
228 mutex_lock(&table->mutex); 227 mutex_lock(&table->mutex);
229 228
@@ -240,8 +239,8 @@ int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int ob
240 goto out; 239 goto out;
241 } 240 }
242 241
243 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 242 if (mthca_MAP_ICM(dev, table->icm[i],
244 &status) || status) { 243 table->virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
245 mthca_free_icm(dev, table->icm[i], table->coherent); 244 mthca_free_icm(dev, table->icm[i], table->coherent);
246 table->icm[i] = NULL; 245 table->icm[i] = NULL;
247 ret = -ENOMEM; 246 ret = -ENOMEM;
@@ -258,7 +257,6 @@ out:
258void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) 257void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
259{ 258{
260 int i; 259 int i;
261 u8 status;
262 260
263 if (!mthca_is_memfree(dev)) 261 if (!mthca_is_memfree(dev))
264 return; 262 return;
@@ -269,8 +267,7 @@ void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int o
269 267
270 if (--table->icm[i]->refcount == 0) { 268 if (--table->icm[i]->refcount == 0) {
271 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 269 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
272 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 270 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
273 &status);
274 mthca_free_icm(dev, table->icm[i], table->coherent); 271 mthca_free_icm(dev, table->icm[i], table->coherent);
275 table->icm[i] = NULL; 272 table->icm[i] = NULL;
276 } 273 }
@@ -366,7 +363,6 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
366 int num_icm; 363 int num_icm;
367 unsigned chunk_size; 364 unsigned chunk_size;
368 int i; 365 int i;
369 u8 status;
370 366
371 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size; 367 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
372 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk); 368 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
@@ -396,8 +392,8 @@ struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
396 __GFP_NOWARN, use_coherent); 392 __GFP_NOWARN, use_coherent);
397 if (!table->icm[i]) 393 if (!table->icm[i])
398 goto err; 394 goto err;
399 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, 395 if (mthca_MAP_ICM(dev, table->icm[i],
400 &status) || status) { 396 virt + i * MTHCA_TABLE_CHUNK_SIZE)) {
401 mthca_free_icm(dev, table->icm[i], table->coherent); 397 mthca_free_icm(dev, table->icm[i], table->coherent);
402 table->icm[i] = NULL; 398 table->icm[i] = NULL;
403 goto err; 399 goto err;
@@ -416,8 +412,7 @@ err:
416 for (i = 0; i < num_icm; ++i) 412 for (i = 0; i < num_icm; ++i)
417 if (table->icm[i]) { 413 if (table->icm[i]) {
418 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, 414 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
419 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 415 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
420 &status);
421 mthca_free_icm(dev, table->icm[i], table->coherent); 416 mthca_free_icm(dev, table->icm[i], table->coherent);
422 } 417 }
423 418
@@ -429,13 +424,12 @@ err:
429void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) 424void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
430{ 425{
431 int i; 426 int i;
432 u8 status;
433 427
434 for (i = 0; i < table->num_icm; ++i) 428 for (i = 0; i < table->num_icm; ++i)
435 if (table->icm[i]) { 429 if (table->icm[i]) {
436 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, 430 mthca_UNMAP_ICM(dev,
437 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE, 431 table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
438 &status); 432 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE);
439 mthca_free_icm(dev, table->icm[i], table->coherent); 433 mthca_free_icm(dev, table->icm[i], table->coherent);
440 } 434 }
441 435
@@ -454,7 +448,6 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
454{ 448{
455 struct page *pages[1]; 449 struct page *pages[1];
456 int ret = 0; 450 int ret = 0;
457 u8 status;
458 int i; 451 int i;
459 452
460 if (!mthca_is_memfree(dev)) 453 if (!mthca_is_memfree(dev))
@@ -494,9 +487,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
494 } 487 }
495 488
496 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), 489 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
497 mthca_uarc_virt(dev, uar, i), &status); 490 mthca_uarc_virt(dev, uar, i));
498 if (!ret && status)
499 ret = -EINVAL;
500 if (ret) { 491 if (ret) {
501 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 492 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
502 put_page(sg_page(&db_tab->page[i].mem)); 493 put_page(sg_page(&db_tab->page[i].mem));
@@ -557,14 +548,13 @@ void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
557 struct mthca_user_db_table *db_tab) 548 struct mthca_user_db_table *db_tab)
558{ 549{
559 int i; 550 int i;
560 u8 status;
561 551
562 if (!mthca_is_memfree(dev)) 552 if (!mthca_is_memfree(dev))
563 return; 553 return;
564 554
565 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) { 555 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
566 if (db_tab->page[i].uvirt) { 556 if (db_tab->page[i].uvirt) {
567 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); 557 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1);
568 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); 558 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
569 put_page(sg_page(&db_tab->page[i].mem)); 559 put_page(sg_page(&db_tab->page[i].mem));
570 } 560 }
@@ -581,7 +571,6 @@ int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
581 int i, j; 571 int i, j;
582 struct mthca_db_page *page; 572 struct mthca_db_page *page;
583 int ret = 0; 573 int ret = 0;
584 u8 status;
585 574
586 mutex_lock(&dev->db_tab->mutex); 575 mutex_lock(&dev->db_tab->mutex);
587 576
@@ -644,9 +633,7 @@ alloc:
644 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE); 633 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
645 634
646 ret = mthca_MAP_ICM_page(dev, page->mapping, 635 ret = mthca_MAP_ICM_page(dev, page->mapping,
647 mthca_uarc_virt(dev, &dev->driver_uar, i), &status); 636 mthca_uarc_virt(dev, &dev->driver_uar, i));
648 if (!ret && status)
649 ret = -EINVAL;
650 if (ret) { 637 if (ret) {
651 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 638 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
652 page->db_rec, page->mapping); 639 page->db_rec, page->mapping);
@@ -678,7 +665,6 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
678{ 665{
679 int i, j; 666 int i, j;
680 struct mthca_db_page *page; 667 struct mthca_db_page *page;
681 u8 status;
682 668
683 i = db_index / MTHCA_DB_REC_PER_PAGE; 669 i = db_index / MTHCA_DB_REC_PER_PAGE;
684 j = db_index % MTHCA_DB_REC_PER_PAGE; 670 j = db_index % MTHCA_DB_REC_PER_PAGE;
@@ -694,7 +680,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
694 680
695 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && 681 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
696 i >= dev->db_tab->max_group1 - 1) { 682 i >= dev->db_tab->max_group1 - 1) {
697 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 683 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
698 684
699 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 685 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
700 page->db_rec, page->mapping); 686 page->db_rec, page->mapping);
@@ -745,7 +731,6 @@ int mthca_init_db_tab(struct mthca_dev *dev)
745void mthca_cleanup_db_tab(struct mthca_dev *dev) 731void mthca_cleanup_db_tab(struct mthca_dev *dev)
746{ 732{
747 int i; 733 int i;
748 u8 status;
749 734
750 if (!mthca_is_memfree(dev)) 735 if (!mthca_is_memfree(dev))
751 return; 736 return;
@@ -763,7 +748,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
763 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) 748 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
764 mthca_warn(dev, "Kernel UARC page %d not empty\n", i); 749 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
765 750
766 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); 751 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1);
767 752
768 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE, 753 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
769 dev->db_tab->page[i].db_rec, 754 dev->db_tab->page[i].db_rec,
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 44045c8846db..ab876f928a1b 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -257,7 +257,6 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
257 struct mthca_mailbox *mailbox; 257 struct mthca_mailbox *mailbox;
258 __be64 *mtt_entry; 258 __be64 *mtt_entry;
259 int err = 0; 259 int err = 0;
260 u8 status;
261 int i; 260 int i;
262 261
263 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 262 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -281,17 +280,11 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
281 if (i & 1) 280 if (i & 1)
282 mtt_entry[i + 2] = 0; 281 mtt_entry[i + 2] = 0;
283 282
284 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); 283 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1);
285 if (err) { 284 if (err) {
286 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err); 285 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
287 goto out; 286 goto out;
288 } 287 }
289 if (status) {
290 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
291 status);
292 err = -EINVAL;
293 goto out;
294 }
295 288
296 list_len -= i; 289 list_len -= i;
297 start_index += i; 290 start_index += i;
@@ -441,7 +434,6 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
441 u32 key; 434 u32 key;
442 int i; 435 int i;
443 int err; 436 int err;
444 u8 status;
445 437
446 WARN_ON(buffer_size_shift >= 32); 438 WARN_ON(buffer_size_shift >= 32);
447 439
@@ -497,16 +489,10 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
497 } 489 }
498 490
499 err = mthca_SW2HW_MPT(dev, mailbox, 491 err = mthca_SW2HW_MPT(dev, mailbox,
500 key & (dev->limits.num_mpts - 1), 492 key & (dev->limits.num_mpts - 1));
501 &status);
502 if (err) { 493 if (err) {
503 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 494 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
504 goto err_out_mailbox; 495 goto err_out_mailbox;
505 } else if (status) {
506 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
507 status);
508 err = -EINVAL;
509 goto err_out_mailbox;
510 } 496 }
511 497
512 mthca_free_mailbox(dev, mailbox); 498 mthca_free_mailbox(dev, mailbox);
@@ -567,17 +553,12 @@ static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
567void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) 553void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
568{ 554{
569 int err; 555 int err;
570 u8 status;
571 556
572 err = mthca_HW2SW_MPT(dev, NULL, 557 err = mthca_HW2SW_MPT(dev, NULL,
573 key_to_hw_index(dev, mr->ibmr.lkey) & 558 key_to_hw_index(dev, mr->ibmr.lkey) &
574 (dev->limits.num_mpts - 1), 559 (dev->limits.num_mpts - 1));
575 &status);
576 if (err) 560 if (err)
577 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err); 561 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
578 else if (status)
579 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
580 status);
581 562
582 mthca_free_region(dev, mr->ibmr.lkey); 563 mthca_free_region(dev, mr->ibmr.lkey);
583 mthca_free_mtt(dev, mr->mtt); 564 mthca_free_mtt(dev, mr->mtt);
@@ -590,7 +571,6 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
590 struct mthca_mailbox *mailbox; 571 struct mthca_mailbox *mailbox;
591 u64 mtt_seg; 572 u64 mtt_seg;
592 u32 key, idx; 573 u32 key, idx;
593 u8 status;
594 int list_len = mr->attr.max_pages; 574 int list_len = mr->attr.max_pages;
595 int err = -ENOMEM; 575 int err = -ENOMEM;
596 int i; 576 int i;
@@ -672,18 +652,11 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
672 } 652 }
673 653
674 err = mthca_SW2HW_MPT(dev, mailbox, 654 err = mthca_SW2HW_MPT(dev, mailbox,
675 key & (dev->limits.num_mpts - 1), 655 key & (dev->limits.num_mpts - 1));
676 &status);
677 if (err) { 656 if (err) {
678 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err); 657 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
679 goto err_out_mailbox_free; 658 goto err_out_mailbox_free;
680 } 659 }
681 if (status) {
682 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
683 status);
684 err = -EINVAL;
685 goto err_out_mailbox_free;
686 }
687 660
688 mthca_free_mailbox(dev, mailbox); 661 mthca_free_mailbox(dev, mailbox);
689 return 0; 662 return 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 1e0b4b6074ad..365fe0e14192 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -63,8 +63,6 @@ static int mthca_query_device(struct ib_device *ibdev,
63 int err = -ENOMEM; 63 int err = -ENOMEM;
64 struct mthca_dev *mdev = to_mdev(ibdev); 64 struct mthca_dev *mdev = to_mdev(ibdev);
65 65
66 u8 status;
67
68 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 66 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
69 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 67 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
70 if (!in_mad || !out_mad) 68 if (!in_mad || !out_mad)
@@ -78,14 +76,9 @@ static int mthca_query_device(struct ib_device *ibdev,
78 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 76 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
79 77
80 err = mthca_MAD_IFC(mdev, 1, 1, 78 err = mthca_MAD_IFC(mdev, 1, 1,
81 1, NULL, NULL, in_mad, out_mad, 79 1, NULL, NULL, in_mad, out_mad);
82 &status);
83 if (err) 80 if (err)
84 goto out; 81 goto out;
85 if (status) {
86 err = -EINVAL;
87 goto out;
88 }
89 82
90 props->device_cap_flags = mdev->device_cap_flags; 83 props->device_cap_flags = mdev->device_cap_flags;
91 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & 84 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
@@ -141,7 +134,6 @@ static int mthca_query_port(struct ib_device *ibdev,
141 struct ib_smp *in_mad = NULL; 134 struct ib_smp *in_mad = NULL;
142 struct ib_smp *out_mad = NULL; 135 struct ib_smp *out_mad = NULL;
143 int err = -ENOMEM; 136 int err = -ENOMEM;
144 u8 status;
145 137
146 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 138 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
147 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 139 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -155,14 +147,9 @@ static int mthca_query_port(struct ib_device *ibdev,
155 in_mad->attr_mod = cpu_to_be32(port); 147 in_mad->attr_mod = cpu_to_be32(port);
156 148
157 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 149 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
158 port, NULL, NULL, in_mad, out_mad, 150 port, NULL, NULL, in_mad, out_mad);
159 &status);
160 if (err) 151 if (err)
161 goto out; 152 goto out;
162 if (status) {
163 err = -EINVAL;
164 goto out;
165 }
166 153
167 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 154 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
168 props->lmc = out_mad->data[34] & 0x7; 155 props->lmc = out_mad->data[34] & 0x7;
@@ -214,7 +201,6 @@ static int mthca_modify_port(struct ib_device *ibdev,
214 struct mthca_set_ib_param set_ib; 201 struct mthca_set_ib_param set_ib;
215 struct ib_port_attr attr; 202 struct ib_port_attr attr;
216 int err; 203 int err;
217 u8 status;
218 204
219 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) 205 if (mutex_lock_interruptible(&to_mdev(ibdev)->cap_mask_mutex))
220 return -ERESTARTSYS; 206 return -ERESTARTSYS;
@@ -229,14 +215,9 @@ static int mthca_modify_port(struct ib_device *ibdev,
229 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & 215 set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
230 ~props->clr_port_cap_mask; 216 ~props->clr_port_cap_mask;
231 217
232 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); 218 err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port);
233 if (err) 219 if (err)
234 goto out; 220 goto out;
235 if (status) {
236 err = -EINVAL;
237 goto out;
238 }
239
240out: 221out:
241 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex); 222 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
242 return err; 223 return err;
@@ -248,7 +229,6 @@ static int mthca_query_pkey(struct ib_device *ibdev,
248 struct ib_smp *in_mad = NULL; 229 struct ib_smp *in_mad = NULL;
249 struct ib_smp *out_mad = NULL; 230 struct ib_smp *out_mad = NULL;
250 int err = -ENOMEM; 231 int err = -ENOMEM;
251 u8 status;
252 232
253 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 233 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
254 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 234 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -260,14 +240,9 @@ static int mthca_query_pkey(struct ib_device *ibdev,
260 in_mad->attr_mod = cpu_to_be32(index / 32); 240 in_mad->attr_mod = cpu_to_be32(index / 32);
261 241
262 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 242 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
263 port, NULL, NULL, in_mad, out_mad, 243 port, NULL, NULL, in_mad, out_mad);
264 &status);
265 if (err) 244 if (err)
266 goto out; 245 goto out;
267 if (status) {
268 err = -EINVAL;
269 goto out;
270 }
271 246
272 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); 247 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
273 248
@@ -283,7 +258,6 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
283 struct ib_smp *in_mad = NULL; 258 struct ib_smp *in_mad = NULL;
284 struct ib_smp *out_mad = NULL; 259 struct ib_smp *out_mad = NULL;
285 int err = -ENOMEM; 260 int err = -ENOMEM;
286 u8 status;
287 261
288 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 262 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
289 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 263 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -295,14 +269,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
295 in_mad->attr_mod = cpu_to_be32(port); 269 in_mad->attr_mod = cpu_to_be32(port);
296 270
297 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 271 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
298 port, NULL, NULL, in_mad, out_mad, 272 port, NULL, NULL, in_mad, out_mad);
299 &status);
300 if (err) 273 if (err)
301 goto out; 274 goto out;
302 if (status) {
303 err = -EINVAL;
304 goto out;
305 }
306 275
307 memcpy(gid->raw, out_mad->data + 8, 8); 276 memcpy(gid->raw, out_mad->data + 8, 8);
308 277
@@ -311,14 +280,9 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
311 in_mad->attr_mod = cpu_to_be32(index / 8); 280 in_mad->attr_mod = cpu_to_be32(index / 8);
312 281
313 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, 282 err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1,
314 port, NULL, NULL, in_mad, out_mad, 283 port, NULL, NULL, in_mad, out_mad);
315 &status);
316 if (err) 284 if (err)
317 goto out; 285 goto out;
318 if (status) {
319 err = -EINVAL;
320 goto out;
321 }
322 286
323 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); 287 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
324 288
@@ -800,7 +764,6 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
800 struct mthca_cq *cq = to_mcq(ibcq); 764 struct mthca_cq *cq = to_mcq(ibcq);
801 struct mthca_resize_cq ucmd; 765 struct mthca_resize_cq ucmd;
802 u32 lkey; 766 u32 lkey;
803 u8 status;
804 int ret; 767 int ret;
805 768
806 if (entries < 1 || entries > dev->limits.max_cqes) 769 if (entries < 1 || entries > dev->limits.max_cqes)
@@ -827,9 +790,7 @@ static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *uda
827 lkey = ucmd.lkey; 790 lkey = ucmd.lkey;
828 } 791 }
829 792
830 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries), &status); 793 ret = mthca_RESIZE_CQ(dev, cq->cqn, lkey, ilog2(entries));
831 if (status)
832 ret = -EINVAL;
833 794
834 if (ret) { 795 if (ret) {
835 if (cq->resize_buf) { 796 if (cq->resize_buf) {
@@ -1161,7 +1122,6 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
1161{ 1122{
1162 struct ib_fmr *fmr; 1123 struct ib_fmr *fmr;
1163 int err; 1124 int err;
1164 u8 status;
1165 struct mthca_dev *mdev = NULL; 1125 struct mthca_dev *mdev = NULL;
1166 1126
1167 list_for_each_entry(fmr, fmr_list, list) { 1127 list_for_each_entry(fmr, fmr_list, list) {
@@ -1182,12 +1142,8 @@ static int mthca_unmap_fmr(struct list_head *fmr_list)
1182 list_for_each_entry(fmr, fmr_list, list) 1142 list_for_each_entry(fmr, fmr_list, list)
1183 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); 1143 mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr));
1184 1144
1185 err = mthca_SYNC_TPT(mdev, &status); 1145 err = mthca_SYNC_TPT(mdev);
1186 if (err) 1146 return err;
1187 return err;
1188 if (status)
1189 return -EINVAL;
1190 return 0;
1191} 1147}
1192 1148
1193static ssize_t show_rev(struct device *device, struct device_attribute *attr, 1149static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -1253,7 +1209,6 @@ static int mthca_init_node_data(struct mthca_dev *dev)
1253 struct ib_smp *in_mad = NULL; 1209 struct ib_smp *in_mad = NULL;
1254 struct ib_smp *out_mad = NULL; 1210 struct ib_smp *out_mad = NULL;
1255 int err = -ENOMEM; 1211 int err = -ENOMEM;
1256 u8 status;
1257 1212
1258 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); 1213 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
1259 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); 1214 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
@@ -1264,28 +1219,18 @@ static int mthca_init_node_data(struct mthca_dev *dev)
1264 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; 1219 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
1265 1220
1266 err = mthca_MAD_IFC(dev, 1, 1, 1221 err = mthca_MAD_IFC(dev, 1, 1,
1267 1, NULL, NULL, in_mad, out_mad, 1222 1, NULL, NULL, in_mad, out_mad);
1268 &status);
1269 if (err) 1223 if (err)
1270 goto out; 1224 goto out;
1271 if (status) {
1272 err = -EINVAL;
1273 goto out;
1274 }
1275 1225
1276 memcpy(dev->ib_dev.node_desc, out_mad->data, 64); 1226 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
1277 1227
1278 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; 1228 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
1279 1229
1280 err = mthca_MAD_IFC(dev, 1, 1, 1230 err = mthca_MAD_IFC(dev, 1, 1,
1281 1, NULL, NULL, in_mad, out_mad, 1231 1, NULL, NULL, in_mad, out_mad);
1282 &status);
1283 if (err) 1232 if (err)
1284 goto out; 1233 goto out;
1285 if (status) {
1286 err = -EINVAL;
1287 goto out;
1288 }
1289 1234
1290 if (mthca_is_memfree(dev)) 1235 if (mthca_is_memfree(dev))
1291 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32)); 1236 dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index a34c9d38e822..9601049e14d0 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -308,7 +308,6 @@ static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
308static void init_port(struct mthca_dev *dev, int port) 308static void init_port(struct mthca_dev *dev, int port)
309{ 309{
310 int err; 310 int err;
311 u8 status;
312 struct mthca_init_ib_param param; 311 struct mthca_init_ib_param param;
313 312
314 memset(&param, 0, sizeof param); 313 memset(&param, 0, sizeof param);
@@ -319,11 +318,9 @@ static void init_port(struct mthca_dev *dev, int port)
319 param.gid_cap = dev->limits.gid_table_len; 318 param.gid_cap = dev->limits.gid_table_len;
320 param.pkey_cap = dev->limits.pkey_table_len; 319 param.pkey_cap = dev->limits.pkey_table_len;
321 320
322 err = mthca_INIT_IB(dev, &param, port, &status); 321 err = mthca_INIT_IB(dev, &param, port);
323 if (err) 322 if (err)
324 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err); 323 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
325 if (status)
326 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
327} 324}
328 325
329static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr, 326static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
@@ -433,7 +430,6 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
433 struct mthca_qp_param *qp_param; 430 struct mthca_qp_param *qp_param;
434 struct mthca_qp_context *context; 431 struct mthca_qp_context *context;
435 int mthca_state; 432 int mthca_state;
436 u8 status;
437 433
438 mutex_lock(&qp->mutex); 434 mutex_lock(&qp->mutex);
439 435
@@ -448,12 +444,9 @@ int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_m
448 goto out; 444 goto out;
449 } 445 }
450 446
451 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox, &status); 447 err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
452 if (err) 448 if (err) {
453 goto out_mailbox; 449 mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
454 if (status) {
455 mthca_warn(dev, "QUERY_QP returned status %02x\n", status);
456 err = -EINVAL;
457 goto out_mailbox; 450 goto out_mailbox;
458 } 451 }
459 452
@@ -555,7 +548,6 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
555 struct mthca_qp_param *qp_param; 548 struct mthca_qp_param *qp_param;
556 struct mthca_qp_context *qp_context; 549 struct mthca_qp_context *qp_context;
557 u32 sqd_event = 0; 550 u32 sqd_event = 0;
558 u8 status;
559 int err = -EINVAL; 551 int err = -EINVAL;
560 552
561 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 553 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -781,13 +773,10 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
781 sqd_event = 1 << 31; 773 sqd_event = 1 << 31;
782 774
783 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0, 775 err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
784 mailbox, sqd_event, &status); 776 mailbox, sqd_event);
785 if (err) 777 if (err) {
786 goto out_mailbox; 778 mthca_warn(dev, "modify QP %d->%d returned %d.\n",
787 if (status) { 779 cur_state, new_state, err);
788 mthca_warn(dev, "modify QP %d->%d returned status %02x.\n",
789 cur_state, new_state, status);
790 err = -EINVAL;
791 goto out_mailbox; 780 goto out_mailbox;
792 } 781 }
793 782
@@ -817,7 +806,7 @@ static int __mthca_modify_qp(struct ib_qp *ibqp,
817 cur_state != IB_QPS_ERR && 806 cur_state != IB_QPS_ERR &&
818 (new_state == IB_QPS_RESET || 807 (new_state == IB_QPS_RESET ||
819 new_state == IB_QPS_ERR)) 808 new_state == IB_QPS_ERR))
820 mthca_CLOSE_IB(dev, qp->port, &status); 809 mthca_CLOSE_IB(dev, qp->port);
821 } 810 }
822 811
823 /* 812 /*
@@ -1429,7 +1418,6 @@ static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
1429void mthca_free_qp(struct mthca_dev *dev, 1418void mthca_free_qp(struct mthca_dev *dev,
1430 struct mthca_qp *qp) 1419 struct mthca_qp *qp)
1431{ 1420{
1432 u8 status;
1433 struct mthca_cq *send_cq; 1421 struct mthca_cq *send_cq;
1434 struct mthca_cq *recv_cq; 1422 struct mthca_cq *recv_cq;
1435 1423
@@ -1454,7 +1442,7 @@ void mthca_free_qp(struct mthca_dev *dev,
1454 1442
1455 if (qp->state != IB_QPS_RESET) 1443 if (qp->state != IB_QPS_RESET)
1456 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0, 1444 mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
1457 NULL, 0, &status); 1445 NULL, 0);
1458 1446
1459 /* 1447 /*
1460 * If this is a userspace QP, the buffers, MR, CQs and so on 1448 * If this is a userspace QP, the buffers, MR, CQs and so on
@@ -2263,7 +2251,6 @@ void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2263int mthca_init_qp_table(struct mthca_dev *dev) 2251int mthca_init_qp_table(struct mthca_dev *dev)
2264{ 2252{
2265 int err; 2253 int err;
2266 u8 status;
2267 int i; 2254 int i;
2268 2255
2269 spin_lock_init(&dev->qp_table.lock); 2256 spin_lock_init(&dev->qp_table.lock);
@@ -2290,15 +2277,10 @@ int mthca_init_qp_table(struct mthca_dev *dev)
2290 2277
2291 for (i = 0; i < 2; ++i) { 2278 for (i = 0; i < 2; ++i) {
2292 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI, 2279 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2293 dev->qp_table.sqp_start + i * 2, 2280 dev->qp_table.sqp_start + i * 2);
2294 &status); 2281 if (err) {
2295 if (err)
2296 goto err_out;
2297 if (status) {
2298 mthca_warn(dev, "CONF_SPECIAL_QP returned " 2282 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2299 "status %02x, aborting.\n", 2283 "%d, aborting.\n", err);
2300 status);
2301 err = -EINVAL;
2302 goto err_out; 2284 goto err_out;
2303 } 2285 }
2304 } 2286 }
@@ -2306,7 +2288,7 @@ int mthca_init_qp_table(struct mthca_dev *dev)
2306 2288
2307 err_out: 2289 err_out:
2308 for (i = 0; i < 2; ++i) 2290 for (i = 0; i < 2; ++i)
2309 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2291 mthca_CONF_SPECIAL_QP(dev, i, 0);
2310 2292
2311 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2293 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2312 mthca_alloc_cleanup(&dev->qp_table.alloc); 2294 mthca_alloc_cleanup(&dev->qp_table.alloc);
@@ -2317,10 +2299,9 @@ int mthca_init_qp_table(struct mthca_dev *dev)
2317void mthca_cleanup_qp_table(struct mthca_dev *dev) 2299void mthca_cleanup_qp_table(struct mthca_dev *dev)
2318{ 2300{
2319 int i; 2301 int i;
2320 u8 status;
2321 2302
2322 for (i = 0; i < 2; ++i) 2303 for (i = 0; i < 2; ++i)
2323 mthca_CONF_SPECIAL_QP(dev, i, 0, &status); 2304 mthca_CONF_SPECIAL_QP(dev, i, 0);
2324 2305
2325 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); 2306 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2326 mthca_alloc_cleanup(&dev->qp_table.alloc); 2307 mthca_alloc_cleanup(&dev->qp_table.alloc);
diff --git a/drivers/infiniband/hw/mthca/mthca_reset.c b/drivers/infiniband/hw/mthca/mthca_reset.c
index 2a13a163d337..4fa3534ec233 100644
--- a/drivers/infiniband/hw/mthca/mthca_reset.c
+++ b/drivers/infiniband/hw/mthca/mthca_reset.c
@@ -113,7 +113,7 @@ int mthca_reset(struct mthca_dev *mdev)
113 } 113 }
114 114
115 hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX); 115 hca_pcix_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_PCIX);
116 hca_pcie_cap = pci_find_capability(mdev->pdev, PCI_CAP_ID_EXP); 116 hca_pcie_cap = pci_pcie_cap(mdev->pdev);
117 117
118 if (bridge) { 118 if (bridge) {
119 bridge_header = kmalloc(256, GFP_KERNEL); 119 bridge_header = kmalloc(256, GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 4fabe62aab8a..d22f970480c0 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -200,7 +200,6 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
200 struct ib_srq_attr *attr, struct mthca_srq *srq) 200 struct ib_srq_attr *attr, struct mthca_srq *srq)
201{ 201{
202 struct mthca_mailbox *mailbox; 202 struct mthca_mailbox *mailbox;
203 u8 status;
204 int ds; 203 int ds;
205 int err; 204 int err;
206 205
@@ -266,18 +265,12 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
266 else 265 else
267 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); 266 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
268 267
269 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); 268 err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
270 269
271 if (err) { 270 if (err) {
272 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err); 271 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
273 goto err_out_free_buf; 272 goto err_out_free_buf;
274 } 273 }
275 if (status) {
276 mthca_warn(dev, "SW2HW_SRQ returned status 0x%02x\n",
277 status);
278 err = -EINVAL;
279 goto err_out_free_buf;
280 }
281 274
282 spin_lock_irq(&dev->srq_table.lock); 275 spin_lock_irq(&dev->srq_table.lock);
283 if (mthca_array_set(&dev->srq_table.srq, 276 if (mthca_array_set(&dev->srq_table.srq,
@@ -299,11 +292,9 @@ int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
299 return 0; 292 return 0;
300 293
301err_out_free_srq: 294err_out_free_srq:
302 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 295 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
303 if (err) 296 if (err)
304 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 297 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
305 else if (status)
306 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
307 298
308err_out_free_buf: 299err_out_free_buf:
309 if (!pd->ibpd.uobject) 300 if (!pd->ibpd.uobject)
@@ -340,7 +331,6 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
340{ 331{
341 struct mthca_mailbox *mailbox; 332 struct mthca_mailbox *mailbox;
342 int err; 333 int err;
343 u8 status;
344 334
345 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 335 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
346 if (IS_ERR(mailbox)) { 336 if (IS_ERR(mailbox)) {
@@ -348,11 +338,9 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
348 return; 338 return;
349 } 339 }
350 340
351 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); 341 err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
352 if (err) 342 if (err)
353 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err); 343 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
354 else if (status)
355 mthca_warn(dev, "HW2SW_SRQ returned status 0x%02x\n", status);
356 344
357 spin_lock_irq(&dev->srq_table.lock); 345 spin_lock_irq(&dev->srq_table.lock);
358 mthca_array_clear(&dev->srq_table.srq, 346 mthca_array_clear(&dev->srq_table.srq,
@@ -378,8 +366,7 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
378{ 366{
379 struct mthca_dev *dev = to_mdev(ibsrq->device); 367 struct mthca_dev *dev = to_mdev(ibsrq->device);
380 struct mthca_srq *srq = to_msrq(ibsrq); 368 struct mthca_srq *srq = to_msrq(ibsrq);
381 int ret; 369 int ret = 0;
382 u8 status;
383 370
384 /* We don't support resizing SRQs (yet?) */ 371 /* We don't support resizing SRQs (yet?) */
385 if (attr_mask & IB_SRQ_MAX_WR) 372 if (attr_mask & IB_SRQ_MAX_WR)
@@ -391,16 +378,11 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
391 return -EINVAL; 378 return -EINVAL;
392 379
393 mutex_lock(&srq->mutex); 380 mutex_lock(&srq->mutex);
394 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); 381 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
395 mutex_unlock(&srq->mutex); 382 mutex_unlock(&srq->mutex);
396
397 if (ret)
398 return ret;
399 if (status)
400 return -EINVAL;
401 } 383 }
402 384
403 return 0; 385 return ret;
404} 386}
405 387
406int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) 388int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
@@ -410,14 +392,13 @@ int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
410 struct mthca_mailbox *mailbox; 392 struct mthca_mailbox *mailbox;
411 struct mthca_arbel_srq_context *arbel_ctx; 393 struct mthca_arbel_srq_context *arbel_ctx;
412 struct mthca_tavor_srq_context *tavor_ctx; 394 struct mthca_tavor_srq_context *tavor_ctx;
413 u8 status;
414 int err; 395 int err;
415 396
416 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 397 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
417 if (IS_ERR(mailbox)) 398 if (IS_ERR(mailbox))
418 return PTR_ERR(mailbox); 399 return PTR_ERR(mailbox);
419 400
420 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); 401 err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
421 if (err) 402 if (err)
422 goto out; 403 goto out;
423 404
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 95ca93ceedac..9f2f7d4b1197 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -605,16 +605,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
605 605
606 606
607/** 607/**
608 * nes_modify_port
609 */
610static int nes_modify_port(struct ib_device *ibdev, u8 port,
611 int port_modify_mask, struct ib_port_modify *props)
612{
613 return 0;
614}
615
616
617/**
618 * nes_query_pkey 608 * nes_query_pkey
619 */ 609 */
620static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) 610static int nes_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
@@ -3882,7 +3872,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev)
3882 nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; 3872 nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev;
3883 nesibdev->ibdev.query_device = nes_query_device; 3873 nesibdev->ibdev.query_device = nes_query_device;
3884 nesibdev->ibdev.query_port = nes_query_port; 3874 nesibdev->ibdev.query_port = nes_query_port;
3885 nesibdev->ibdev.modify_port = nes_modify_port;
3886 nesibdev->ibdev.query_pkey = nes_query_pkey; 3875 nesibdev->ibdev.query_pkey = nes_query_pkey;
3887 nesibdev->ibdev.query_gid = nes_query_gid; 3876 nesibdev->ibdev.query_gid = nes_query_gid;
3888 nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext; 3877 nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext;
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index 769a1d9da4b7..c9624ea87209 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1012,6 +1012,8 @@ struct qib_devdata {
1012 u8 psxmitwait_supported; 1012 u8 psxmitwait_supported;
1013 /* cycle length of PS* counters in HW (in picoseconds) */ 1013 /* cycle length of PS* counters in HW (in picoseconds) */
1014 u16 psxmitwait_check_rate; 1014 u16 psxmitwait_check_rate;
1015 /* high volume overflow errors defered to tasklet */
1016 struct tasklet_struct error_tasklet;
1015}; 1017};
1016 1018
1017/* hol_state values */ 1019/* hol_state values */
@@ -1433,6 +1435,7 @@ extern struct mutex qib_mutex;
1433struct qib_hwerror_msgs { 1435struct qib_hwerror_msgs {
1434 u64 mask; 1436 u64 mask;
1435 const char *msg; 1437 const char *msg;
1438 size_t sz;
1436}; 1439};
1437 1440
1438#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b } 1441#define QLOGIC_IB_HWE_MSG(a, b) { .mask = a, .msg = b }
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 406fca50d036..26253039d2c7 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -1527,6 +1527,7 @@ done_chk_sdma:
1527 struct qib_filedata *fd = fp->private_data; 1527 struct qib_filedata *fd = fp->private_data;
1528 const struct qib_ctxtdata *rcd = fd->rcd; 1528 const struct qib_ctxtdata *rcd = fd->rcd;
1529 const struct qib_devdata *dd = rcd->dd; 1529 const struct qib_devdata *dd = rcd->dd;
1530 unsigned int weight;
1530 1531
1531 if (dd->flags & QIB_HAS_SEND_DMA) { 1532 if (dd->flags & QIB_HAS_SEND_DMA) {
1532 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev, 1533 fd->pq = qib_user_sdma_queue_create(&dd->pcidev->dev,
@@ -1545,8 +1546,8 @@ done_chk_sdma:
1545 * it just means that sooner or later we don't recommend 1546 * it just means that sooner or later we don't recommend
1546 * a cpu, and let the scheduler do it's best. 1547 * a cpu, and let the scheduler do it's best.
1547 */ 1548 */
1548 if (!ret && cpus_weight(current->cpus_allowed) >= 1549 weight = cpumask_weight(tsk_cpus_allowed(current));
1549 qib_cpulist_count) { 1550 if (!ret && weight >= qib_cpulist_count) {
1550 int cpu; 1551 int cpu;
1551 cpu = find_first_zero_bit(qib_cpulist, 1552 cpu = find_first_zero_bit(qib_cpulist,
1552 qib_cpulist_count); 1553 qib_cpulist_count);
@@ -1554,13 +1555,13 @@ done_chk_sdma:
1554 __set_bit(cpu, qib_cpulist); 1555 __set_bit(cpu, qib_cpulist);
1555 fd->rec_cpu_num = cpu; 1556 fd->rec_cpu_num = cpu;
1556 } 1557 }
1557 } else if (cpus_weight(current->cpus_allowed) == 1 && 1558 } else if (weight == 1 &&
1558 test_bit(first_cpu(current->cpus_allowed), 1559 test_bit(cpumask_first(tsk_cpus_allowed(current)),
1559 qib_cpulist)) 1560 qib_cpulist))
1560 qib_devinfo(dd->pcidev, "%s PID %u affinity " 1561 qib_devinfo(dd->pcidev, "%s PID %u affinity "
1561 "set to cpu %d; already allocated\n", 1562 "set to cpu %d; already allocated\n",
1562 current->comm, current->pid, 1563 current->comm, current->pid,
1563 first_cpu(current->cpus_allowed)); 1564 cpumask_first(tsk_cpus_allowed(current)));
1564 } 1565 }
1565 1566
1566 mutex_unlock(&qib_mutex); 1567 mutex_unlock(&qib_mutex);
@@ -1904,8 +1905,9 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1904 struct qib_ctxtdata *rcd; 1905 struct qib_ctxtdata *rcd;
1905 unsigned ctxt; 1906 unsigned ctxt;
1906 int ret = 0; 1907 int ret = 0;
1908 unsigned long flags;
1907 1909
1908 spin_lock(&ppd->dd->uctxt_lock); 1910 spin_lock_irqsave(&ppd->dd->uctxt_lock, flags);
1909 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts; 1911 for (ctxt = ppd->dd->first_user_ctxt; ctxt < ppd->dd->cfgctxts;
1910 ctxt++) { 1912 ctxt++) {
1911 rcd = ppd->dd->rcd[ctxt]; 1913 rcd = ppd->dd->rcd[ctxt];
@@ -1924,7 +1926,7 @@ int qib_set_uevent_bits(struct qib_pportdata *ppd, const int evtbit)
1924 ret = 1; 1926 ret = 1;
1925 break; 1927 break;
1926 } 1928 }
1927 spin_unlock(&ppd->dd->uctxt_lock); 1929 spin_unlock_irqrestore(&ppd->dd->uctxt_lock, flags);
1928 1930
1929 return ret; 1931 return ret;
1930} 1932}
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index c765a2eb04cf..e1f947446c2a 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -2434,6 +2434,7 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2434 int lsb, ret = 0, setforce = 0; 2434 int lsb, ret = 0, setforce = 0;
2435 u16 lcmd, licmd; 2435 u16 lcmd, licmd;
2436 unsigned long flags; 2436 unsigned long flags;
2437 u32 tmp = 0;
2437 2438
2438 switch (which) { 2439 switch (which) {
2439 case QIB_IB_CFG_LIDLMC: 2440 case QIB_IB_CFG_LIDLMC:
@@ -2467,9 +2468,6 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2467 maskr = IBA7220_IBC_WIDTH_MASK; 2468 maskr = IBA7220_IBC_WIDTH_MASK;
2468 lsb = IBA7220_IBC_WIDTH_SHIFT; 2469 lsb = IBA7220_IBC_WIDTH_SHIFT;
2469 setforce = 1; 2470 setforce = 1;
2470 spin_lock_irqsave(&ppd->lflags_lock, flags);
2471 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2472 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2473 break; 2471 break;
2474 2472
2475 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */ 2473 case QIB_IB_CFG_SPD_ENB: /* set allowed Link speeds */
@@ -2643,6 +2641,28 @@ static int qib_7220_set_ib_cfg(struct qib_pportdata *ppd, int which, u32 val)
2643 goto bail; 2641 goto bail;
2644 } 2642 }
2645 qib_set_ib_7220_lstate(ppd, lcmd, licmd); 2643 qib_set_ib_7220_lstate(ppd, lcmd, licmd);
2644
2645 maskr = IBA7220_IBC_WIDTH_MASK;
2646 lsb = IBA7220_IBC_WIDTH_SHIFT;
2647 tmp = (ppd->cpspec->ibcddrctrl >> lsb) & maskr;
2648 /* If the width active on the chip does not match the
2649 * width in the shadow register, write the new active
2650 * width to the chip.
2651 * We don't have to worry about speed as the speed is taken
2652 * care of by set_7220_ibspeed_fast called by ib_updown.
2653 */
2654 if (ppd->link_width_enabled-1 != tmp) {
2655 ppd->cpspec->ibcddrctrl &= ~(maskr << lsb);
2656 ppd->cpspec->ibcddrctrl |=
2657 (((u64)(ppd->link_width_enabled-1) & maskr) <<
2658 lsb);
2659 qib_write_kreg(dd, kr_ibcddrctrl,
2660 ppd->cpspec->ibcddrctrl);
2661 qib_write_kreg(dd, kr_scratch, 0);
2662 spin_lock_irqsave(&ppd->lflags_lock, flags);
2663 ppd->lflags |= QIBL_IB_FORCE_NOTIFY;
2664 spin_unlock_irqrestore(&ppd->lflags_lock, flags);
2665 }
2646 goto bail; 2666 goto bail;
2647 2667
2648 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */ 2668 case QIB_IB_CFG_HRTBT: /* set Heartbeat off/enable/auto */
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 8ec5237031a0..5ea9ece23b33 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -114,6 +114,10 @@ static ushort qib_singleport;
114module_param_named(singleport, qib_singleport, ushort, S_IRUGO); 114module_param_named(singleport, qib_singleport, ushort, S_IRUGO);
115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space"); 115MODULE_PARM_DESC(singleport, "Use only IB port 1; more per-port buffer space");
116 116
117static ushort qib_krcvq01_no_msi;
118module_param_named(krcvq01_no_msi, qib_krcvq01_no_msi, ushort, S_IRUGO);
119MODULE_PARM_DESC(krcvq01_no_msi, "No MSI for kctx < 2");
120
117/* 121/*
118 * Receive header queue sizes 122 * Receive header queue sizes
119 */ 123 */
@@ -397,7 +401,6 @@ MODULE_PARM_DESC(txselect, \
397#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt) 401#define crp_txdroppedpkt CREG_IDX(TxDroppedPktCnt)
398#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt) 402#define crp_txhdrerr CREG_IDX(TxHeadersErrCnt)
399#define crp_txlenerr CREG_IDX(TxLenErrCnt) 403#define crp_txlenerr CREG_IDX(TxLenErrCnt)
400#define crp_txlenerr CREG_IDX(TxLenErrCnt)
401#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt) 404#define crp_txminmaxlenerr CREG_IDX(TxMaxMinLenErrCnt)
402#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt) 405#define crp_txsdmadesc CREG_IDX(TxSDmaDescCnt)
403#define crp_txunderrun CREG_IDX(TxUnderrunCnt) 406#define crp_txunderrun CREG_IDX(TxUnderrunCnt)
@@ -1107,9 +1110,9 @@ static inline u32 read_7322_creg32_port(const struct qib_pportdata *ppd,
1107#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */ 1110#define AUTONEG_TRIES 3 /* sequential retries to negotiate DDR */
1108 1111
1109#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \ 1112#define HWE_AUTO(fldname) { .mask = SYM_MASK(HwErrMask, fldname##Mask), \
1110 .msg = #fldname } 1113 .msg = #fldname , .sz = sizeof(#fldname) }
1111#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \ 1114#define HWE_AUTO_P(fldname, port) { .mask = SYM_MASK(HwErrMask, \
1112 fldname##Mask##_##port), .msg = #fldname } 1115 fldname##Mask##_##port), .msg = #fldname , .sz = sizeof(#fldname) }
1113static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = { 1116static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1114 HWE_AUTO_P(IBSerdesPClkNotDetect, 1), 1117 HWE_AUTO_P(IBSerdesPClkNotDetect, 1),
1115 HWE_AUTO_P(IBSerdesPClkNotDetect, 0), 1118 HWE_AUTO_P(IBSerdesPClkNotDetect, 0),
@@ -1127,14 +1130,16 @@ static const struct qib_hwerror_msgs qib_7322_hwerror_msgs[] = {
1127 HWE_AUTO_P(IBCBusFromSPCParityErr, 0), 1130 HWE_AUTO_P(IBCBusFromSPCParityErr, 0),
1128 HWE_AUTO(statusValidNoEop), 1131 HWE_AUTO(statusValidNoEop),
1129 HWE_AUTO(LATriggered), 1132 HWE_AUTO(LATriggered),
1130 { .mask = 0 } 1133 { .mask = 0, .sz = 0 }
1131}; 1134};
1132 1135
1133#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \ 1136#define E_AUTO(fldname) { .mask = SYM_MASK(ErrMask, fldname##Mask), \
1134 .msg = #fldname } 1137 .msg = #fldname, .sz = sizeof(#fldname) }
1135#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \ 1138#define E_P_AUTO(fldname) { .mask = SYM_MASK(ErrMask_0, fldname##Mask), \
1136 .msg = #fldname } 1139 .msg = #fldname, .sz = sizeof(#fldname) }
1137static const struct qib_hwerror_msgs qib_7322error_msgs[] = { 1140static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1141 E_AUTO(RcvEgrFullErr),
1142 E_AUTO(RcvHdrFullErr),
1138 E_AUTO(ResetNegated), 1143 E_AUTO(ResetNegated),
1139 E_AUTO(HardwareErr), 1144 E_AUTO(HardwareErr),
1140 E_AUTO(InvalidAddrErr), 1145 E_AUTO(InvalidAddrErr),
@@ -1147,9 +1152,7 @@ static const struct qib_hwerror_msgs qib_7322error_msgs[] = {
1147 E_AUTO(SendSpecialTriggerErr), 1152 E_AUTO(SendSpecialTriggerErr),
1148 E_AUTO(SDmaWrongPortErr), 1153 E_AUTO(SDmaWrongPortErr),
1149 E_AUTO(SDmaBufMaskDuplicateErr), 1154 E_AUTO(SDmaBufMaskDuplicateErr),
1150 E_AUTO(RcvHdrFullErr), 1155 { .mask = 0, .sz = 0 }
1151 E_AUTO(RcvEgrFullErr),
1152 { .mask = 0 }
1153}; 1156};
1154 1157
1155static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = { 1158static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
@@ -1159,7 +1162,8 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1159 /* 1162 /*
1160 * SDmaHaltErr is not really an error, make it clearer; 1163 * SDmaHaltErr is not really an error, make it clearer;
1161 */ 1164 */
1162 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted"}, 1165 {.mask = SYM_MASK(ErrMask_0, SDmaHaltErrMask), .msg = "SDmaHalted",
1166 .sz = 11},
1163 E_P_AUTO(SDmaDescAddrMisalignErr), 1167 E_P_AUTO(SDmaDescAddrMisalignErr),
1164 E_P_AUTO(SDmaUnexpDataErr), 1168 E_P_AUTO(SDmaUnexpDataErr),
1165 E_P_AUTO(SDmaMissingDwErr), 1169 E_P_AUTO(SDmaMissingDwErr),
@@ -1195,7 +1199,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1195 E_P_AUTO(RcvICRCErr), 1199 E_P_AUTO(RcvICRCErr),
1196 E_P_AUTO(RcvVCRCErr), 1200 E_P_AUTO(RcvVCRCErr),
1197 E_P_AUTO(RcvFormatErr), 1201 E_P_AUTO(RcvFormatErr),
1198 { .mask = 0 } 1202 { .mask = 0, .sz = 0 }
1199}; 1203};
1200 1204
1201/* 1205/*
@@ -1203,17 +1207,17 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1203 * context 1207 * context
1204 */ 1208 */
1205#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \ 1209#define INTR_AUTO(fldname) { .mask = SYM_MASK(IntMask, fldname##Mask), \
1206 .msg = #fldname } 1210 .msg = #fldname, .sz = sizeof(#fldname) }
1207/* Below generates "auto-message" for interrupts specific to a port */ 1211/* Below generates "auto-message" for interrupts specific to a port */
1208#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\ 1212#define INTR_AUTO_P(fldname) { .mask = MASK_ACROSS(\
1209 SYM_LSB(IntMask, fldname##Mask##_0), \ 1213 SYM_LSB(IntMask, fldname##Mask##_0), \
1210 SYM_LSB(IntMask, fldname##Mask##_1)), \ 1214 SYM_LSB(IntMask, fldname##Mask##_1)), \
1211 .msg = #fldname "_P" } 1215 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1212/* For some reason, the SerDesTrimDone bits are reversed */ 1216/* For some reason, the SerDesTrimDone bits are reversed */
1213#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\ 1217#define INTR_AUTO_PI(fldname) { .mask = MASK_ACROSS(\
1214 SYM_LSB(IntMask, fldname##Mask##_1), \ 1218 SYM_LSB(IntMask, fldname##Mask##_1), \
1215 SYM_LSB(IntMask, fldname##Mask##_0)), \ 1219 SYM_LSB(IntMask, fldname##Mask##_0)), \
1216 .msg = #fldname "_P" } 1220 .msg = #fldname "_P", .sz = sizeof(#fldname "_P") }
1217/* 1221/*
1218 * Below generates "auto-message" for interrupts specific to a context, 1222 * Below generates "auto-message" for interrupts specific to a context,
1219 * with ctxt-number appended 1223 * with ctxt-number appended
@@ -1221,7 +1225,7 @@ static const struct qib_hwerror_msgs qib_7322p_error_msgs[] = {
1221#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\ 1225#define INTR_AUTO_C(fldname) { .mask = MASK_ACROSS(\
1222 SYM_LSB(IntMask, fldname##0IntMask), \ 1226 SYM_LSB(IntMask, fldname##0IntMask), \
1223 SYM_LSB(IntMask, fldname##17IntMask)), \ 1227 SYM_LSB(IntMask, fldname##17IntMask)), \
1224 .msg = #fldname "_C"} 1228 .msg = #fldname "_C", .sz = sizeof(#fldname "_C") }
1225 1229
1226static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = { 1230static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1227 INTR_AUTO_P(SDmaInt), 1231 INTR_AUTO_P(SDmaInt),
@@ -1235,11 +1239,12 @@ static const struct qib_hwerror_msgs qib_7322_intr_msgs[] = {
1235 INTR_AUTO_P(SendDoneInt), 1239 INTR_AUTO_P(SendDoneInt),
1236 INTR_AUTO(SendBufAvailInt), 1240 INTR_AUTO(SendBufAvailInt),
1237 INTR_AUTO_C(RcvAvail), 1241 INTR_AUTO_C(RcvAvail),
1238 { .mask = 0 } 1242 { .mask = 0, .sz = 0 }
1239}; 1243};
1240 1244
1241#define TXSYMPTOM_AUTO_P(fldname) \ 1245#define TXSYMPTOM_AUTO_P(fldname) \
1242 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), .msg = #fldname } 1246 { .mask = SYM_MASK(SendHdrErrSymptom_0, fldname), \
1247 .msg = #fldname, .sz = sizeof(#fldname) }
1243static const struct qib_hwerror_msgs hdrchk_msgs[] = { 1248static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1244 TXSYMPTOM_AUTO_P(NonKeyPacket), 1249 TXSYMPTOM_AUTO_P(NonKeyPacket),
1245 TXSYMPTOM_AUTO_P(GRHFail), 1250 TXSYMPTOM_AUTO_P(GRHFail),
@@ -1248,7 +1253,7 @@ static const struct qib_hwerror_msgs hdrchk_msgs[] = {
1248 TXSYMPTOM_AUTO_P(SLIDFail), 1253 TXSYMPTOM_AUTO_P(SLIDFail),
1249 TXSYMPTOM_AUTO_P(RawIPV6), 1254 TXSYMPTOM_AUTO_P(RawIPV6),
1250 TXSYMPTOM_AUTO_P(PacketTooSmall), 1255 TXSYMPTOM_AUTO_P(PacketTooSmall),
1251 { .mask = 0 } 1256 { .mask = 0, .sz = 0 }
1252}; 1257};
1253 1258
1254#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */ 1259#define IBA7322_HDRHEAD_PKTINT_SHIFT 32 /* interrupt cnt in upper 32 bits */
@@ -1293,7 +1298,7 @@ static void err_decode(char *msg, size_t len, u64 errs,
1293 u64 these, lmask; 1298 u64 these, lmask;
1294 int took, multi, n = 0; 1299 int took, multi, n = 0;
1295 1300
1296 while (msp && msp->mask) { 1301 while (errs && msp && msp->mask) {
1297 multi = (msp->mask & (msp->mask - 1)); 1302 multi = (msp->mask & (msp->mask - 1));
1298 while (errs & msp->mask) { 1303 while (errs & msp->mask) {
1299 these = (errs & msp->mask); 1304 these = (errs & msp->mask);
@@ -1304,9 +1309,14 @@ static void err_decode(char *msg, size_t len, u64 errs,
1304 *msg++ = ','; 1309 *msg++ = ',';
1305 len--; 1310 len--;
1306 } 1311 }
1307 took = scnprintf(msg, len, "%s", msp->msg); 1312 BUG_ON(!msp->sz);
1313 /* msp->sz counts the nul */
1314 took = min_t(size_t, msp->sz - (size_t)1, len);
1315 memcpy(msg, msp->msg, took);
1308 len -= took; 1316 len -= took;
1309 msg += took; 1317 msg += took;
1318 if (len)
1319 *msg = '\0';
1310 } 1320 }
1311 errs &= ~lmask; 1321 errs &= ~lmask;
1312 if (len && multi) { 1322 if (len && multi) {
@@ -1644,6 +1654,14 @@ done:
1644 return; 1654 return;
1645} 1655}
1646 1656
1657static void qib_error_tasklet(unsigned long data)
1658{
1659 struct qib_devdata *dd = (struct qib_devdata *)data;
1660
1661 handle_7322_errors(dd);
1662 qib_write_kreg(dd, kr_errmask, dd->cspec->errormask);
1663}
1664
1647static void reenable_chase(unsigned long opaque) 1665static void reenable_chase(unsigned long opaque)
1648{ 1666{
1649 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; 1667 struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
@@ -2725,8 +2743,10 @@ static noinline void unlikely_7322_intr(struct qib_devdata *dd, u64 istat)
2725 unknown_7322_ibits(dd, istat); 2743 unknown_7322_ibits(dd, istat);
2726 if (istat & QIB_I_GPIO) 2744 if (istat & QIB_I_GPIO)
2727 unknown_7322_gpio_intr(dd); 2745 unknown_7322_gpio_intr(dd);
2728 if (istat & QIB_I_C_ERROR) 2746 if (istat & QIB_I_C_ERROR) {
2729 handle_7322_errors(dd); 2747 qib_write_kreg(dd, kr_errmask, 0ULL);
2748 tasklet_schedule(&dd->error_tasklet);
2749 }
2730 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0]) 2750 if (istat & INT_MASK_P(Err, 0) && dd->rcd[0])
2731 handle_7322_p_errors(dd->rcd[0]->ppd); 2751 handle_7322_p_errors(dd->rcd[0]->ppd);
2732 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1]) 2752 if (istat & INT_MASK_P(Err, 1) && dd->rcd[1])
@@ -3125,6 +3145,8 @@ try_intx:
3125 arg = dd->rcd[ctxt]; 3145 arg = dd->rcd[ctxt];
3126 if (!arg) 3146 if (!arg)
3127 continue; 3147 continue;
3148 if (qib_krcvq01_no_msi && ctxt < 2)
3149 continue;
3128 lsb = QIB_I_RCVAVAIL_LSB + ctxt; 3150 lsb = QIB_I_RCVAVAIL_LSB + ctxt;
3129 handler = qib_7322pintr; 3151 handler = qib_7322pintr;
3130 name = QIB_DRV_NAME " (kctx)"; 3152 name = QIB_DRV_NAME " (kctx)";
@@ -3159,6 +3181,8 @@ try_intx:
3159 for (i = 0; i < ARRAY_SIZE(redirect); i++) 3181 for (i = 0; i < ARRAY_SIZE(redirect); i++)
3160 qib_write_kreg(dd, kr_intredirect + i, redirect[i]); 3182 qib_write_kreg(dd, kr_intredirect + i, redirect[i]);
3161 dd->cspec->main_int_mask = mask; 3183 dd->cspec->main_int_mask = mask;
3184 tasklet_init(&dd->error_tasklet, qib_error_tasklet,
3185 (unsigned long)dd);
3162bail:; 3186bail:;
3163} 3187}
3164 3188
@@ -6788,6 +6812,10 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev,
6788 (i >= ARRAY_SIZE(irq_table) && 6812 (i >= ARRAY_SIZE(irq_table) &&
6789 dd->rcd[i - ARRAY_SIZE(irq_table)])) 6813 dd->rcd[i - ARRAY_SIZE(irq_table)]))
6790 actual_cnt++; 6814 actual_cnt++;
6815 /* reduce by ctxt's < 2 */
6816 if (qib_krcvq01_no_msi)
6817 actual_cnt -= dd->num_pports;
6818
6791 tabsize = actual_cnt; 6819 tabsize = actual_cnt;
6792 dd->cspec->msix_entries = kmalloc(tabsize * 6820 dd->cspec->msix_entries = kmalloc(tabsize *
6793 sizeof(struct msix_entry), GFP_KERNEL); 6821 sizeof(struct msix_entry), GFP_KERNEL);
diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c
index 8fd3df5bf04d..3b3745f261f0 100644
--- a/drivers/infiniband/hw/qib/qib_mad.c
+++ b/drivers/infiniband/hw/qib/qib_mad.c
@@ -1125,22 +1125,22 @@ static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev,
1125 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; 1125 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
1126} 1126}
1127 1127
1128static int pma_get_classportinfo(struct ib_perf *pmp, 1128static int pma_get_classportinfo(struct ib_pma_mad *pmp,
1129 struct ib_device *ibdev) 1129 struct ib_device *ibdev)
1130{ 1130{
1131 struct ib_pma_classportinfo *p = 1131 struct ib_class_port_info *p =
1132 (struct ib_pma_classportinfo *)pmp->data; 1132 (struct ib_class_port_info *)pmp->data;
1133 struct qib_devdata *dd = dd_from_ibdev(ibdev); 1133 struct qib_devdata *dd = dd_from_ibdev(ibdev);
1134 1134
1135 memset(pmp->data, 0, sizeof(pmp->data)); 1135 memset(pmp->data, 0, sizeof(pmp->data));
1136 1136
1137 if (pmp->attr_mod != 0) 1137 if (pmp->mad_hdr.attr_mod != 0)
1138 pmp->status |= IB_SMP_INVALID_FIELD; 1138 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1139 1139
1140 /* Note that AllPortSelect is not valid */ 1140 /* Note that AllPortSelect is not valid */
1141 p->base_version = 1; 1141 p->base_version = 1;
1142 p->class_version = 1; 1142 p->class_version = 1;
1143 p->cap_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; 1143 p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
1144 /* 1144 /*
1145 * Set the most significant bit of CM2 to indicate support for 1145 * Set the most significant bit of CM2 to indicate support for
1146 * congestion statistics 1146 * congestion statistics
@@ -1154,7 +1154,7 @@ static int pma_get_classportinfo(struct ib_perf *pmp,
1154 return reply((struct ib_smp *) pmp); 1154 return reply((struct ib_smp *) pmp);
1155} 1155}
1156 1156
1157static int pma_get_portsamplescontrol(struct ib_perf *pmp, 1157static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp,
1158 struct ib_device *ibdev, u8 port) 1158 struct ib_device *ibdev, u8 port)
1159{ 1159{
1160 struct ib_pma_portsamplescontrol *p = 1160 struct ib_pma_portsamplescontrol *p =
@@ -1169,8 +1169,8 @@ static int pma_get_portsamplescontrol(struct ib_perf *pmp,
1169 memset(pmp->data, 0, sizeof(pmp->data)); 1169 memset(pmp->data, 0, sizeof(pmp->data));
1170 1170
1171 p->port_select = port_select; 1171 p->port_select = port_select;
1172 if (pmp->attr_mod != 0 || port_select != port) { 1172 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1173 pmp->status |= IB_SMP_INVALID_FIELD; 1173 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1174 goto bail; 1174 goto bail;
1175 } 1175 }
1176 spin_lock_irqsave(&ibp->lock, flags); 1176 spin_lock_irqsave(&ibp->lock, flags);
@@ -1192,7 +1192,7 @@ bail:
1192 return reply((struct ib_smp *) pmp); 1192 return reply((struct ib_smp *) pmp);
1193} 1193}
1194 1194
1195static int pma_set_portsamplescontrol(struct ib_perf *pmp, 1195static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp,
1196 struct ib_device *ibdev, u8 port) 1196 struct ib_device *ibdev, u8 port)
1197{ 1197{
1198 struct ib_pma_portsamplescontrol *p = 1198 struct ib_pma_portsamplescontrol *p =
@@ -1205,8 +1205,8 @@ static int pma_set_portsamplescontrol(struct ib_perf *pmp,
1205 u8 status, xmit_flags; 1205 u8 status, xmit_flags;
1206 int ret; 1206 int ret;
1207 1207
1208 if (pmp->attr_mod != 0 || p->port_select != port) { 1208 if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) {
1209 pmp->status |= IB_SMP_INVALID_FIELD; 1209 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1210 ret = reply((struct ib_smp *) pmp); 1210 ret = reply((struct ib_smp *) pmp);
1211 goto bail; 1211 goto bail;
1212 } 1212 }
@@ -1321,7 +1321,7 @@ static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd,
1321 return ret; 1321 return ret;
1322} 1322}
1323 1323
1324static int pma_get_portsamplesresult(struct ib_perf *pmp, 1324static int pma_get_portsamplesresult(struct ib_pma_mad *pmp,
1325 struct ib_device *ibdev, u8 port) 1325 struct ib_device *ibdev, u8 port)
1326{ 1326{
1327 struct ib_pma_portsamplesresult *p = 1327 struct ib_pma_portsamplesresult *p =
@@ -1360,7 +1360,7 @@ static int pma_get_portsamplesresult(struct ib_perf *pmp,
1360 return reply((struct ib_smp *) pmp); 1360 return reply((struct ib_smp *) pmp);
1361} 1361}
1362 1362
1363static int pma_get_portsamplesresult_ext(struct ib_perf *pmp, 1363static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp,
1364 struct ib_device *ibdev, u8 port) 1364 struct ib_device *ibdev, u8 port)
1365{ 1365{
1366 struct ib_pma_portsamplesresult_ext *p = 1366 struct ib_pma_portsamplesresult_ext *p =
@@ -1402,7 +1402,7 @@ static int pma_get_portsamplesresult_ext(struct ib_perf *pmp,
1402 return reply((struct ib_smp *) pmp); 1402 return reply((struct ib_smp *) pmp);
1403} 1403}
1404 1404
1405static int pma_get_portcounters(struct ib_perf *pmp, 1405static int pma_get_portcounters(struct ib_pma_mad *pmp,
1406 struct ib_device *ibdev, u8 port) 1406 struct ib_device *ibdev, u8 port)
1407{ 1407{
1408 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1408 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1436,8 +1436,8 @@ static int pma_get_portcounters(struct ib_perf *pmp,
1436 memset(pmp->data, 0, sizeof(pmp->data)); 1436 memset(pmp->data, 0, sizeof(pmp->data));
1437 1437
1438 p->port_select = port_select; 1438 p->port_select = port_select;
1439 if (pmp->attr_mod != 0 || port_select != port) 1439 if (pmp->mad_hdr.attr_mod != 0 || port_select != port)
1440 pmp->status |= IB_SMP_INVALID_FIELD; 1440 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1441 1441
1442 if (cntrs.symbol_error_counter > 0xFFFFUL) 1442 if (cntrs.symbol_error_counter > 0xFFFFUL)
1443 p->symbol_error_counter = cpu_to_be16(0xFFFF); 1443 p->symbol_error_counter = cpu_to_be16(0xFFFF);
@@ -1472,7 +1472,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
1472 cntrs.local_link_integrity_errors = 0xFUL; 1472 cntrs.local_link_integrity_errors = 0xFUL;
1473 if (cntrs.excessive_buffer_overrun_errors > 0xFUL) 1473 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1474 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1474 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1475 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1475 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1476 cntrs.excessive_buffer_overrun_errors; 1476 cntrs.excessive_buffer_overrun_errors;
1477 if (cntrs.vl15_dropped > 0xFFFFUL) 1477 if (cntrs.vl15_dropped > 0xFFFFUL)
1478 p->vl15_dropped = cpu_to_be16(0xFFFF); 1478 p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1500,7 +1500,7 @@ static int pma_get_portcounters(struct ib_perf *pmp,
1500 return reply((struct ib_smp *) pmp); 1500 return reply((struct ib_smp *) pmp);
1501} 1501}
1502 1502
1503static int pma_get_portcounters_cong(struct ib_perf *pmp, 1503static int pma_get_portcounters_cong(struct ib_pma_mad *pmp,
1504 struct ib_device *ibdev, u8 port) 1504 struct ib_device *ibdev, u8 port)
1505{ 1505{
1506 /* Congestion PMA packets start at offset 24 not 64 */ 1506 /* Congestion PMA packets start at offset 24 not 64 */
@@ -1510,7 +1510,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
1510 struct qib_ibport *ibp = to_iport(ibdev, port); 1510 struct qib_ibport *ibp = to_iport(ibdev, port);
1511 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1511 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1512 struct qib_devdata *dd = dd_from_ppd(ppd); 1512 struct qib_devdata *dd = dd_from_ppd(ppd);
1513 u32 port_select = be32_to_cpu(pmp->attr_mod) & 0xFF; 1513 u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF;
1514 u64 xmit_wait_counter; 1514 u64 xmit_wait_counter;
1515 unsigned long flags; 1515 unsigned long flags;
1516 1516
@@ -1519,9 +1519,9 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
1519 * SET method ends up calling this anyway. 1519 * SET method ends up calling this anyway.
1520 */ 1520 */
1521 if (!dd->psxmitwait_supported) 1521 if (!dd->psxmitwait_supported)
1522 pmp->status |= IB_SMP_UNSUP_METH_ATTR; 1522 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1523 if (port_select != port) 1523 if (port_select != port)
1524 pmp->status |= IB_SMP_INVALID_FIELD; 1524 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1525 1525
1526 qib_get_counters(ppd, &cntrs); 1526 qib_get_counters(ppd, &cntrs);
1527 spin_lock_irqsave(&ppd->ibport_data.lock, flags); 1527 spin_lock_irqsave(&ppd->ibport_data.lock, flags);
@@ -1603,7 +1603,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
1603 cntrs.local_link_integrity_errors = 0xFUL; 1603 cntrs.local_link_integrity_errors = 0xFUL;
1604 if (cntrs.excessive_buffer_overrun_errors > 0xFUL) 1604 if (cntrs.excessive_buffer_overrun_errors > 0xFUL)
1605 cntrs.excessive_buffer_overrun_errors = 0xFUL; 1605 cntrs.excessive_buffer_overrun_errors = 0xFUL;
1606 p->lli_ebor_errors = (cntrs.local_link_integrity_errors << 4) | 1606 p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) |
1607 cntrs.excessive_buffer_overrun_errors; 1607 cntrs.excessive_buffer_overrun_errors;
1608 if (cntrs.vl15_dropped > 0xFFFFUL) 1608 if (cntrs.vl15_dropped > 0xFFFFUL)
1609 p->vl15_dropped = cpu_to_be16(0xFFFF); 1609 p->vl15_dropped = cpu_to_be16(0xFFFF);
@@ -1613,7 +1613,7 @@ static int pma_get_portcounters_cong(struct ib_perf *pmp,
1613 return reply((struct ib_smp *)pmp); 1613 return reply((struct ib_smp *)pmp);
1614} 1614}
1615 1615
1616static int pma_get_portcounters_ext(struct ib_perf *pmp, 1616static int pma_get_portcounters_ext(struct ib_pma_mad *pmp,
1617 struct ib_device *ibdev, u8 port) 1617 struct ib_device *ibdev, u8 port)
1618{ 1618{
1619 struct ib_pma_portcounters_ext *p = 1619 struct ib_pma_portcounters_ext *p =
@@ -1626,8 +1626,8 @@ static int pma_get_portcounters_ext(struct ib_perf *pmp,
1626 memset(pmp->data, 0, sizeof(pmp->data)); 1626 memset(pmp->data, 0, sizeof(pmp->data));
1627 1627
1628 p->port_select = port_select; 1628 p->port_select = port_select;
1629 if (pmp->attr_mod != 0 || port_select != port) { 1629 if (pmp->mad_hdr.attr_mod != 0 || port_select != port) {
1630 pmp->status |= IB_SMP_INVALID_FIELD; 1630 pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD;
1631 goto bail; 1631 goto bail;
1632 } 1632 }
1633 1633
@@ -1652,7 +1652,7 @@ bail:
1652 return reply((struct ib_smp *) pmp); 1652 return reply((struct ib_smp *) pmp);
1653} 1653}
1654 1654
1655static int pma_set_portcounters(struct ib_perf *pmp, 1655static int pma_set_portcounters(struct ib_pma_mad *pmp,
1656 struct ib_device *ibdev, u8 port) 1656 struct ib_device *ibdev, u8 port)
1657{ 1657{
1658 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1658 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1715,14 +1715,14 @@ static int pma_set_portcounters(struct ib_perf *pmp,
1715 return pma_get_portcounters(pmp, ibdev, port); 1715 return pma_get_portcounters(pmp, ibdev, port);
1716} 1716}
1717 1717
1718static int pma_set_portcounters_cong(struct ib_perf *pmp, 1718static int pma_set_portcounters_cong(struct ib_pma_mad *pmp,
1719 struct ib_device *ibdev, u8 port) 1719 struct ib_device *ibdev, u8 port)
1720{ 1720{
1721 struct qib_ibport *ibp = to_iport(ibdev, port); 1721 struct qib_ibport *ibp = to_iport(ibdev, port);
1722 struct qib_pportdata *ppd = ppd_from_ibp(ibp); 1722 struct qib_pportdata *ppd = ppd_from_ibp(ibp);
1723 struct qib_devdata *dd = dd_from_ppd(ppd); 1723 struct qib_devdata *dd = dd_from_ppd(ppd);
1724 struct qib_verbs_counters cntrs; 1724 struct qib_verbs_counters cntrs;
1725 u32 counter_select = (be32_to_cpu(pmp->attr_mod) >> 24) & 0xFF; 1725 u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF;
1726 int ret = 0; 1726 int ret = 0;
1727 unsigned long flags; 1727 unsigned long flags;
1728 1728
@@ -1766,7 +1766,7 @@ static int pma_set_portcounters_cong(struct ib_perf *pmp,
1766 return ret; 1766 return ret;
1767} 1767}
1768 1768
1769static int pma_set_portcounters_ext(struct ib_perf *pmp, 1769static int pma_set_portcounters_ext(struct ib_pma_mad *pmp,
1770 struct ib_device *ibdev, u8 port) 1770 struct ib_device *ibdev, u8 port)
1771{ 1771{
1772 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) 1772 struct ib_pma_portcounters *p = (struct ib_pma_portcounters *)
@@ -1959,19 +1959,19 @@ static int process_perf(struct ib_device *ibdev, u8 port,
1959 struct ib_mad *in_mad, 1959 struct ib_mad *in_mad,
1960 struct ib_mad *out_mad) 1960 struct ib_mad *out_mad)
1961{ 1961{
1962 struct ib_perf *pmp = (struct ib_perf *)out_mad; 1962 struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad;
1963 int ret; 1963 int ret;
1964 1964
1965 *out_mad = *in_mad; 1965 *out_mad = *in_mad;
1966 if (pmp->class_version != 1) { 1966 if (pmp->mad_hdr.class_version != 1) {
1967 pmp->status |= IB_SMP_UNSUP_VERSION; 1967 pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION;
1968 ret = reply((struct ib_smp *) pmp); 1968 ret = reply((struct ib_smp *) pmp);
1969 goto bail; 1969 goto bail;
1970 } 1970 }
1971 1971
1972 switch (pmp->method) { 1972 switch (pmp->mad_hdr.method) {
1973 case IB_MGMT_METHOD_GET: 1973 case IB_MGMT_METHOD_GET:
1974 switch (pmp->attr_id) { 1974 switch (pmp->mad_hdr.attr_id) {
1975 case IB_PMA_CLASS_PORT_INFO: 1975 case IB_PMA_CLASS_PORT_INFO:
1976 ret = pma_get_classportinfo(pmp, ibdev); 1976 ret = pma_get_classportinfo(pmp, ibdev);
1977 goto bail; 1977 goto bail;
@@ -1994,13 +1994,13 @@ static int process_perf(struct ib_device *ibdev, u8 port,
1994 ret = pma_get_portcounters_cong(pmp, ibdev, port); 1994 ret = pma_get_portcounters_cong(pmp, ibdev, port);
1995 goto bail; 1995 goto bail;
1996 default: 1996 default:
1997 pmp->status |= IB_SMP_UNSUP_METH_ATTR; 1997 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
1998 ret = reply((struct ib_smp *) pmp); 1998 ret = reply((struct ib_smp *) pmp);
1999 goto bail; 1999 goto bail;
2000 } 2000 }
2001 2001
2002 case IB_MGMT_METHOD_SET: 2002 case IB_MGMT_METHOD_SET:
2003 switch (pmp->attr_id) { 2003 switch (pmp->mad_hdr.attr_id) {
2004 case IB_PMA_PORT_SAMPLES_CONTROL: 2004 case IB_PMA_PORT_SAMPLES_CONTROL:
2005 ret = pma_set_portsamplescontrol(pmp, ibdev, port); 2005 ret = pma_set_portsamplescontrol(pmp, ibdev, port);
2006 goto bail; 2006 goto bail;
@@ -2014,7 +2014,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
2014 ret = pma_set_portcounters_cong(pmp, ibdev, port); 2014 ret = pma_set_portcounters_cong(pmp, ibdev, port);
2015 goto bail; 2015 goto bail;
2016 default: 2016 default:
2017 pmp->status |= IB_SMP_UNSUP_METH_ATTR; 2017 pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR;
2018 ret = reply((struct ib_smp *) pmp); 2018 ret = reply((struct ib_smp *) pmp);
2019 goto bail; 2019 goto bail;
2020 } 2020 }
@@ -2030,7 +2030,7 @@ static int process_perf(struct ib_device *ibdev, u8 port,
2030 goto bail; 2030 goto bail;
2031 2031
2032 default: 2032 default:
2033 pmp->status |= IB_SMP_UNSUP_METHOD; 2033 pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD;
2034 ret = reply((struct ib_smp *) pmp); 2034 ret = reply((struct ib_smp *) pmp);
2035 } 2035 }
2036 2036
diff --git a/drivers/infiniband/hw/qib/qib_mad.h b/drivers/infiniband/hw/qib/qib_mad.h
index 7840ab593bcf..ecc416cdbaaa 100644
--- a/drivers/infiniband/hw/qib/qib_mad.h
+++ b/drivers/infiniband/hw/qib/qib_mad.h
@@ -32,6 +32,8 @@
32 * SOFTWARE. 32 * SOFTWARE.
33 */ 33 */
34 34
35#include <rdma/ib_pma.h>
36
35#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004) 37#define IB_SMP_UNSUP_VERSION cpu_to_be16(0x0004)
36#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008) 38#define IB_SMP_UNSUP_METHOD cpu_to_be16(0x0008)
37#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C) 39#define IB_SMP_UNSUP_METH_ATTR cpu_to_be16(0x000C)
@@ -180,109 +182,8 @@ struct ib_vl_weight_elem {
180#define IB_VLARB_HIGHPRI_0_31 3 182#define IB_VLARB_HIGHPRI_0_31 3
181#define IB_VLARB_HIGHPRI_32_63 4 183#define IB_VLARB_HIGHPRI_32_63 4
182 184
183/*
184 * PMA class portinfo capability mask bits
185 */
186#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
187#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
188#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
189
190#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
191#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
192#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
193#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
194#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
195#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
196#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00) 185#define IB_PMA_PORT_COUNTERS_CONG cpu_to_be16(0xFF00)
197 186
198struct ib_perf {
199 u8 base_version;
200 u8 mgmt_class;
201 u8 class_version;
202 u8 method;
203 __be16 status;
204 __be16 unused;
205 __be64 tid;
206 __be16 attr_id;
207 __be16 resv;
208 __be32 attr_mod;
209 u8 reserved[40];
210 u8 data[192];
211} __attribute__ ((packed));
212
213struct ib_pma_classportinfo {
214 u8 base_version;
215 u8 class_version;
216 __be16 cap_mask;
217 u8 reserved[3];
218 u8 resp_time_value; /* only lower 5 bits */
219 union ib_gid redirect_gid;
220 __be32 redirect_tc_sl_fl; /* 8, 4, 20 bits respectively */
221 __be16 redirect_lid;
222 __be16 redirect_pkey;
223 __be32 redirect_qp; /* only lower 24 bits */
224 __be32 redirect_qkey;
225 union ib_gid trap_gid;
226 __be32 trap_tc_sl_fl; /* 8, 4, 20 bits respectively */
227 __be16 trap_lid;
228 __be16 trap_pkey;
229 __be32 trap_hl_qp; /* 8, 24 bits respectively */
230 __be32 trap_qkey;
231} __attribute__ ((packed));
232
233struct ib_pma_portsamplescontrol {
234 u8 opcode;
235 u8 port_select;
236 u8 tick;
237 u8 counter_width; /* only lower 3 bits */
238 __be32 counter_mask0_9; /* 2, 10 * 3, bits */
239 __be16 counter_mask10_14; /* 1, 5 * 3, bits */
240 u8 sample_mechanisms;
241 u8 sample_status; /* only lower 2 bits */
242 __be64 option_mask;
243 __be64 vendor_mask;
244 __be32 sample_start;
245 __be32 sample_interval;
246 __be16 tag;
247 __be16 counter_select[15];
248} __attribute__ ((packed));
249
250struct ib_pma_portsamplesresult {
251 __be16 tag;
252 __be16 sample_status; /* only lower 2 bits */
253 __be32 counter[15];
254} __attribute__ ((packed));
255
256struct ib_pma_portsamplesresult_ext {
257 __be16 tag;
258 __be16 sample_status; /* only lower 2 bits */
259 __be32 extended_width; /* only upper 2 bits */
260 __be64 counter[15];
261} __attribute__ ((packed));
262
263struct ib_pma_portcounters {
264 u8 reserved;
265 u8 port_select;
266 __be16 counter_select;
267 __be16 symbol_error_counter;
268 u8 link_error_recovery_counter;
269 u8 link_downed_counter;
270 __be16 port_rcv_errors;
271 __be16 port_rcv_remphys_errors;
272 __be16 port_rcv_switch_relay_errors;
273 __be16 port_xmit_discards;
274 u8 port_xmit_constraint_errors;
275 u8 port_rcv_constraint_errors;
276 u8 reserved1;
277 u8 lli_ebor_errors; /* 4, 4, bits */
278 __be16 reserved2;
279 __be16 vl15_dropped;
280 __be32 port_xmit_data;
281 __be32 port_rcv_data;
282 __be32 port_xmit_packets;
283 __be32 port_rcv_packets;
284} __attribute__ ((packed));
285
286struct ib_pma_portcounters_cong { 187struct ib_pma_portcounters_cong {
287 u8 reserved; 188 u8 reserved;
288 u8 reserved1; 189 u8 reserved1;
@@ -297,7 +198,7 @@ struct ib_pma_portcounters_cong {
297 u8 port_xmit_constraint_errors; 198 u8 port_xmit_constraint_errors;
298 u8 port_rcv_constraint_errors; 199 u8 port_rcv_constraint_errors;
299 u8 reserved2; 200 u8 reserved2;
300 u8 lli_ebor_errors; /* 4, 4, bits */ 201 u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
301 __be16 reserved3; 202 __be16 reserved3;
302 __be16 vl15_dropped; 203 __be16 vl15_dropped;
303 __be64 port_xmit_data; 204 __be64 port_xmit_data;
@@ -316,49 +217,11 @@ struct ib_pma_portcounters_cong {
316/* number of 4nsec cycles equaling 2secs */ 217/* number of 4nsec cycles equaling 2secs */
317#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC 218#define QIB_CONG_TIMER_PSINTERVAL 0x1DCD64EC
318 219
319#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
320#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
321#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
322#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
323#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
324#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
325#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
326#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
327#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
328#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
329#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
330#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
331#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
332
333#define IB_PMA_SEL_CONG_ALL 0x01 220#define IB_PMA_SEL_CONG_ALL 0x01
334#define IB_PMA_SEL_CONG_PORT_DATA 0x02 221#define IB_PMA_SEL_CONG_PORT_DATA 0x02
335#define IB_PMA_SEL_CONG_XMIT 0x04 222#define IB_PMA_SEL_CONG_XMIT 0x04
336#define IB_PMA_SEL_CONG_ROUTING 0x08 223#define IB_PMA_SEL_CONG_ROUTING 0x08
337 224
338struct ib_pma_portcounters_ext {
339 u8 reserved;
340 u8 port_select;
341 __be16 counter_select;
342 __be32 reserved1;
343 __be64 port_xmit_data;
344 __be64 port_rcv_data;
345 __be64 port_xmit_packets;
346 __be64 port_rcv_packets;
347 __be64 port_unicast_xmit_packets;
348 __be64 port_unicast_rcv_packets;
349 __be64 port_multicast_xmit_packets;
350 __be64 port_multicast_rcv_packets;
351} __attribute__ ((packed));
352
353#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
354#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
355#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
356#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
357#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
358#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
359#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
360#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
361
362/* 225/*
363 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields 226 * The PortSamplesControl.CounterMasks field is an array of 3 bit fields
364 * which specify the N'th counter's capabilities. See ch. 16.1.3.2. 227 * which specify the N'th counter's capabilities. See ch. 16.1.3.2.
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 891cc2ff5f00..4426782ad288 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -255,7 +255,7 @@ int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent,
255 u16 linkstat, speed; 255 u16 linkstat, speed;
256 int pos = 0, pose, ret = 1; 256 int pos = 0, pose, ret = 1;
257 257
258 pose = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); 258 pose = pci_pcie_cap(dd->pcidev);
259 if (!pose) { 259 if (!pose) {
260 qib_dev_err(dd, "Can't find PCI Express capability!\n"); 260 qib_dev_err(dd, "Can't find PCI Express capability!\n");
261 /* set up something... */ 261 /* set up something... */
@@ -509,7 +509,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
509 qib_devinfo(dd->pcidev, "Parent not root\n"); 509 qib_devinfo(dd->pcidev, "Parent not root\n");
510 return 1; 510 return 1;
511 } 511 }
512 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); 512 ppos = pci_pcie_cap(parent);
513 if (!ppos) 513 if (!ppos)
514 return 1; 514 return 1;
515 if (parent->vendor != 0x8086) 515 if (parent->vendor != 0x8086)
@@ -578,14 +578,14 @@ static int qib_tune_pcie_caps(struct qib_devdata *dd)
578 qib_devinfo(dd->pcidev, "Parent not root\n"); 578 qib_devinfo(dd->pcidev, "Parent not root\n");
579 goto bail; 579 goto bail;
580 } 580 }
581 ppos = pci_find_capability(parent, PCI_CAP_ID_EXP); 581 ppos = pci_pcie_cap(parent);
582 if (ppos) { 582 if (ppos) {
583 pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps); 583 pci_read_config_word(parent, ppos + PCI_EXP_DEVCAP, &pcaps);
584 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl); 584 pci_read_config_word(parent, ppos + PCI_EXP_DEVCTL, &pctl);
585 } else 585 } else
586 goto bail; 586 goto bail;
587 /* Find out supported and configured values for endpoint (us) */ 587 /* Find out supported and configured values for endpoint (us) */
588 epos = pci_find_capability(dd->pcidev, PCI_CAP_ID_EXP); 588 epos = pci_pcie_cap(dd->pcidev);
589 if (epos) { 589 if (epos) {
590 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps); 590 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCAP, &ecaps);
591 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl); 591 pci_read_config_word(dd->pcidev, epos + PCI_EXP_DEVCTL, &ectl);
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index d50a33fe8bbc..14d129de4320 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -507,6 +507,18 @@ static ssize_t show_nctxts(struct device *device,
507 dd->first_user_ctxt); 507 dd->first_user_ctxt);
508} 508}
509 509
510static ssize_t show_nfreectxts(struct device *device,
511 struct device_attribute *attr, char *buf)
512{
513 struct qib_ibdev *dev =
514 container_of(device, struct qib_ibdev, ibdev.dev);
515 struct qib_devdata *dd = dd_from_dev(dev);
516
517 /* Return the number of free user ports (contexts) available. */
518 return scnprintf(buf, PAGE_SIZE, "%u\n", dd->cfgctxts -
519 dd->first_user_ctxt - (u32)qib_stats.sps_ctxts);
520}
521
510static ssize_t show_serial(struct device *device, 522static ssize_t show_serial(struct device *device,
511 struct device_attribute *attr, char *buf) 523 struct device_attribute *attr, char *buf)
512{ 524{
@@ -604,6 +616,7 @@ static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
604static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL); 616static DEVICE_ATTR(board_id, S_IRUGO, show_hca, NULL);
605static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); 617static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
606static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); 618static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
619static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
607static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); 620static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
608static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); 621static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
609static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); 622static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
@@ -617,6 +630,7 @@ static struct device_attribute *qib_attributes[] = {
617 &dev_attr_board_id, 630 &dev_attr_board_id,
618 &dev_attr_version, 631 &dev_attr_version,
619 &dev_attr_nctxts, 632 &dev_attr_nctxts,
633 &dev_attr_nfreectxts,
620 &dev_attr_serial, 634 &dev_attr_serial,
621 &dev_attr_boardversion, 635 &dev_attr_boardversion,
622 &dev_attr_logged_errors, 636 &dev_attr_logged_errors,
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index ee165fdcb596..7d5109bbd1ad 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2127,6 +2127,8 @@ static ssize_t srp_create_target(struct device *dev,
2127 return -ENOMEM; 2127 return -ENOMEM;
2128 2128
2129 target_host->transportt = ib_srp_transport_template; 2129 target_host->transportt = ib_srp_transport_template;
2130 target_host->max_channel = 0;
2131 target_host->max_id = 1;
2130 target_host->max_lun = SRP_MAX_LUN; 2132 target_host->max_lun = SRP_MAX_LUN;
2131 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; 2133 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb;
2132 2134
diff --git a/drivers/net/mlx4/en_ethtool.c b/drivers/net/mlx4/en_ethtool.c
index 2e858e4dcf4d..eb096253d781 100644
--- a/drivers/net/mlx4/en_ethtool.c
+++ b/drivers/net/mlx4/en_ethtool.c
@@ -104,7 +104,7 @@ static void mlx4_en_get_wol(struct net_device *netdev,
104 int err = 0; 104 int err = 0;
105 u64 config = 0; 105 u64 config = 0;
106 106
107 if (!priv->mdev->dev->caps.wol) { 107 if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
108 wol->supported = 0; 108 wol->supported = 0;
109 wol->wolopts = 0; 109 wol->wolopts = 0;
110 return; 110 return;
@@ -134,7 +134,7 @@ static int mlx4_en_set_wol(struct net_device *netdev,
134 u64 config = 0; 134 u64 config = 0;
135 int err = 0; 135 int err = 0;
136 136
137 if (!priv->mdev->dev->caps.wol) 137 if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
138 return -EOPNOTSUPP; 138 return -EOPNOTSUPP;
139 139
140 if (wol->supported & ~WAKE_MAGIC) 140 if (wol->supported & ~WAKE_MAGIC)
@@ -170,7 +170,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
170 return NUM_ALL_STATS + 170 return NUM_ALL_STATS +
171 (priv->tx_ring_num + priv->rx_ring_num) * 2; 171 (priv->tx_ring_num + priv->rx_ring_num) * 2;
172 case ETH_SS_TEST: 172 case ETH_SS_TEST:
173 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.loopback_support) * 2; 173 return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
174 & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
174 default: 175 default:
175 return -EOPNOTSUPP; 176 return -EOPNOTSUPP;
176 } 177 }
@@ -220,7 +221,7 @@ static void mlx4_en_get_strings(struct net_device *dev,
220 case ETH_SS_TEST: 221 case ETH_SS_TEST:
221 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) 222 for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++)
222 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 223 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
223 if (priv->mdev->dev->caps.loopback_support) 224 if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK)
224 for (; i < MLX4_EN_NUM_SELF_TEST; i++) 225 for (; i < MLX4_EN_NUM_SELF_TEST; i++)
225 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); 226 strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]);
226 break; 227 break;
diff --git a/drivers/net/mlx4/en_main.c b/drivers/net/mlx4/en_main.c
index 9276b1b25586..6bfea233a9f2 100644
--- a/drivers/net/mlx4/en_main.c
+++ b/drivers/net/mlx4/en_main.c
@@ -106,7 +106,8 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
106 106
107 params->tcp_rss = tcp_rss; 107 params->tcp_rss = tcp_rss;
108 params->udp_rss = udp_rss; 108 params->udp_rss = udp_rss;
109 if (params->udp_rss && !mdev->dev->caps.udp_rss) { 109 if (params->udp_rss && !(mdev->dev->caps.flags
110 & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
110 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n"); 111 mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
111 params->udp_rss = 0; 112 params->udp_rss = 0;
112 } 113 }
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 9d3f57e76f2f..4b0f32e568f8 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -215,7 +215,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
215 priv->flags |= MLX4_EN_FLAG_PROMISC; 215 priv->flags |= MLX4_EN_FLAG_PROMISC;
216 216
217 /* Enable promiscouos mode */ 217 /* Enable promiscouos mode */
218 if (!mdev->dev->caps.vep_uc_steering) 218 if (!(mdev->dev->caps.flags &
219 MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
219 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 220 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
220 priv->base_qpn, 1); 221 priv->base_qpn, 1);
221 else 222 else
@@ -259,7 +260,7 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
259 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 260 priv->flags &= ~MLX4_EN_FLAG_PROMISC;
260 261
261 /* Disable promiscouos mode */ 262 /* Disable promiscouos mode */
262 if (!mdev->dev->caps.vep_uc_steering) 263 if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
263 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, 264 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port,
264 priv->base_qpn, 0); 265 priv->base_qpn, 0);
265 else 266 else
diff --git a/drivers/net/mlx4/en_port.c b/drivers/net/mlx4/en_port.c
index 2a74bc81b9f7..5e7109178061 100644
--- a/drivers/net/mlx4/en_port.c
+++ b/drivers/net/mlx4/en_port.c
@@ -114,9 +114,11 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
114 struct mlx4_set_port_rqp_calc_context *context; 114 struct mlx4_set_port_rqp_calc_context *context;
115 int err; 115 int err;
116 u32 in_mod; 116 u32 in_mod;
117 u32 m_promisc = (dev->caps.vep_mc_steering) ? MCAST_DIRECT : MCAST_DEFAULT; 117 u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
118 MCAST_DIRECT : MCAST_DEFAULT;
118 119
119 if (dev->caps.vep_mc_steering && dev->caps.vep_uc_steering) 120 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
121 dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
120 return 0; 122 return 0;
121 123
122 mailbox = mlx4_alloc_cmd_mailbox(dev); 124 mailbox = mlx4_alloc_cmd_mailbox(dev);
diff --git a/drivers/net/mlx4/en_selftest.c b/drivers/net/mlx4/en_selftest.c
index 191a8dcd8a93..9fdbcecd499d 100644
--- a/drivers/net/mlx4/en_selftest.c
+++ b/drivers/net/mlx4/en_selftest.c
@@ -159,7 +159,8 @@ retry_tx:
159 goto retry_tx; 159 goto retry_tx;
160 } 160 }
161 161
162 if (priv->mdev->dev->caps.loopback_support){ 162 if (priv->mdev->dev->caps.flags &
163 MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
163 buf[3] = mlx4_en_test_registers(priv); 164 buf[3] = mlx4_en_test_registers(priv);
164 buf[4] = mlx4_en_test_loopback(priv); 165 buf[4] = mlx4_en_test_loopback(priv);
165 } 166 }
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 67a209ba939d..7eb8ba822e97 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -75,7 +75,7 @@ MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (defa
75 } \ 75 } \
76 } while (0) 76 } while (0)
77 77
78static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags) 78static void dump_dev_cap_flags(struct mlx4_dev *dev, u64 flags)
79{ 79{
80 static const char *fname[] = { 80 static const char *fname[] = {
81 [ 0] = "RC transport", 81 [ 0] = "RC transport",
@@ -99,13 +99,19 @@ static void dump_dev_cap_flags(struct mlx4_dev *dev, u32 flags)
99 [21] = "UD multicast support", 99 [21] = "UD multicast support",
100 [24] = "Demand paging support", 100 [24] = "Demand paging support",
101 [25] = "Router support", 101 [25] = "Router support",
102 [30] = "IBoE support" 102 [30] = "IBoE support",
103 [32] = "Unicast loopback support",
104 [38] = "Wake On LAN support",
105 [40] = "UDP RSS support",
106 [41] = "Unicast VEP steering support",
107 [42] = "Multicast VEP steering support",
108 [48] = "Counters support",
103 }; 109 };
104 int i; 110 int i;
105 111
106 mlx4_dbg(dev, "DEV_CAP flags:\n"); 112 mlx4_dbg(dev, "DEV_CAP flags:\n");
107 for (i = 0; i < ARRAY_SIZE(fname); ++i) 113 for (i = 0; i < ARRAY_SIZE(fname); ++i)
108 if (fname[i] && (flags & (1 << i))) 114 if (fname[i] && (flags & (1LL << i)))
109 mlx4_dbg(dev, " %s\n", fname[i]); 115 mlx4_dbg(dev, " %s\n", fname[i]);
110} 116}
111 117
@@ -142,7 +148,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
142 struct mlx4_cmd_mailbox *mailbox; 148 struct mlx4_cmd_mailbox *mailbox;
143 u32 *outbox; 149 u32 *outbox;
144 u8 field; 150 u8 field;
145 u32 field32; 151 u32 field32, flags, ext_flags;
146 u16 size; 152 u16 size;
147 u16 stat_rate; 153 u16 stat_rate;
148 int err; 154 int err;
@@ -180,8 +186,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
180#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b 186#define QUERY_DEV_CAP_MAX_GID_OFFSET 0x3b
181#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c 187#define QUERY_DEV_CAP_RATE_SUPPORT_OFFSET 0x3c
182#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f 188#define QUERY_DEV_CAP_MAX_PKEY_OFFSET 0x3f
183#define QUERY_DEV_CAP_UDP_RSS_OFFSET 0x42 189#define QUERY_DEV_CAP_EXT_FLAGS_OFFSET 0x40
184#define QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET 0x43
185#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44 190#define QUERY_DEV_CAP_FLAGS_OFFSET 0x44
186#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48 191#define QUERY_DEV_CAP_RSVD_UAR_OFFSET 0x48
187#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49 192#define QUERY_DEV_CAP_UAR_SZ_OFFSET 0x49
@@ -199,6 +204,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
199#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63 204#define QUERY_DEV_CAP_MAX_MCG_OFFSET 0x63
200#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64 205#define QUERY_DEV_CAP_RSVD_PD_OFFSET 0x64
201#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65 206#define QUERY_DEV_CAP_MAX_PD_OFFSET 0x65
207#define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68
202#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 208#define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80
203#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 209#define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82
204#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 210#define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84
@@ -272,14 +278,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
272 dev_cap->max_msg_sz = 1 << (field & 0x1f); 278 dev_cap->max_msg_sz = 1 << (field & 0x1f);
273 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); 279 MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET);
274 dev_cap->stat_rate_support = stat_rate; 280 dev_cap->stat_rate_support = stat_rate;
275 MLX4_GET(field, outbox, QUERY_DEV_CAP_UDP_RSS_OFFSET); 281 MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET);
276 dev_cap->udp_rss = field & 0x1; 282 MLX4_GET(flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
277 dev_cap->vep_uc_steering = field & 0x2; 283 dev_cap->flags = flags | (u64)ext_flags << 32;
278 dev_cap->vep_mc_steering = field & 0x4;
279 MLX4_GET(field, outbox, QUERY_DEV_CAP_ETH_UC_LOOPBACK_OFFSET);
280 dev_cap->loopback_support = field & 0x1;
281 dev_cap->wol = field & 0x40;
282 MLX4_GET(dev_cap->flags, outbox, QUERY_DEV_CAP_FLAGS_OFFSET);
283 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET); 284 MLX4_GET(field, outbox, QUERY_DEV_CAP_RSVD_UAR_OFFSET);
284 dev_cap->reserved_uars = field >> 4; 285 dev_cap->reserved_uars = field >> 4;
285 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET); 286 MLX4_GET(field, outbox, QUERY_DEV_CAP_UAR_SZ_OFFSET);
@@ -356,6 +357,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
356 QUERY_DEV_CAP_RSVD_LKEY_OFFSET); 357 QUERY_DEV_CAP_RSVD_LKEY_OFFSET);
357 MLX4_GET(dev_cap->max_icm_sz, outbox, 358 MLX4_GET(dev_cap->max_icm_sz, outbox,
358 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET); 359 QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET);
360 if (dev_cap->flags & MLX4_DEV_CAP_FLAG_COUNTERS)
361 MLX4_GET(dev_cap->max_counters, outbox,
362 QUERY_DEV_CAP_MAX_COUNTERS_OFFSET);
359 363
360 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 364 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
361 for (i = 1; i <= dev_cap->num_ports; ++i) { 365 for (i = 1; i <= dev_cap->num_ports; ++i) {
@@ -449,6 +453,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
449 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", 453 mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
450 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); 454 dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
451 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz); 455 mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
456 mlx4_dbg(dev, "Max counters: %d\n", dev_cap->max_counters);
452 457
453 dump_dev_cap_flags(dev, dev_cap->flags); 458 dump_dev_cap_flags(dev, dev_cap->flags);
454 459
@@ -781,6 +786,10 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
781 if (enable_qos) 786 if (enable_qos)
782 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2); 787 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 2);
783 788
789 /* enable counters */
790 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS)
791 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cpu_to_be32(1 << 4);
792
784 /* QPC/EEC/CQC/EQC/RDMARC attributes */ 793 /* QPC/EEC/CQC/EQC/RDMARC attributes */
785 794
786 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); 795 MLX4_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET);
@@ -801,7 +810,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
801 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); 810 MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET);
802 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); 811 MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
803 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); 812 MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
804 if (dev->caps.vep_mc_steering) 813 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
805 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); 814 MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET);
806 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); 815 MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
807 816
diff --git a/drivers/net/mlx4/fw.h b/drivers/net/mlx4/fw.h
index 88003ebc6185..1e8ecc3708e2 100644
--- a/drivers/net/mlx4/fw.h
+++ b/drivers/net/mlx4/fw.h
@@ -78,12 +78,7 @@ struct mlx4_dev_cap {
78 u16 wavelength[MLX4_MAX_PORTS + 1]; 78 u16 wavelength[MLX4_MAX_PORTS + 1];
79 u64 trans_code[MLX4_MAX_PORTS + 1]; 79 u64 trans_code[MLX4_MAX_PORTS + 1];
80 u16 stat_rate_support; 80 u16 stat_rate_support;
81 int udp_rss; 81 u64 flags;
82 int loopback_support;
83 int vep_uc_steering;
84 int vep_mc_steering;
85 int wol;
86 u32 flags;
87 int reserved_uars; 82 int reserved_uars;
88 int uar_size; 83 int uar_size;
89 int min_page_sz; 84 int min_page_sz;
@@ -116,6 +111,7 @@ struct mlx4_dev_cap {
116 u8 supported_port_types[MLX4_MAX_PORTS + 1]; 111 u8 supported_port_types[MLX4_MAX_PORTS + 1];
117 u8 log_max_macs[MLX4_MAX_PORTS + 1]; 112 u8 log_max_macs[MLX4_MAX_PORTS + 1];
118 u8 log_max_vlans[MLX4_MAX_PORTS + 1]; 113 u8 log_max_vlans[MLX4_MAX_PORTS + 1];
114 u32 max_counters;
119}; 115};
120 116
121struct mlx4_adapter { 117struct mlx4_adapter {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 0cb0431ee19c..c94b3426d355 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -143,6 +143,7 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
143 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB) 143 if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
144 dev->caps.port_mask |= 1 << (i - 1); 144 dev->caps.port_mask |= 1 << (i - 1);
145} 145}
146
146static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) 147static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
147{ 148{
148 int err; 149 int err;
@@ -226,11 +227,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
226 dev->caps.bmme_flags = dev_cap->bmme_flags; 227 dev->caps.bmme_flags = dev_cap->bmme_flags;
227 dev->caps.reserved_lkey = dev_cap->reserved_lkey; 228 dev->caps.reserved_lkey = dev_cap->reserved_lkey;
228 dev->caps.stat_rate_support = dev_cap->stat_rate_support; 229 dev->caps.stat_rate_support = dev_cap->stat_rate_support;
229 dev->caps.udp_rss = dev_cap->udp_rss;
230 dev->caps.loopback_support = dev_cap->loopback_support;
231 dev->caps.vep_uc_steering = dev_cap->vep_uc_steering;
232 dev->caps.vep_mc_steering = dev_cap->vep_mc_steering;
233 dev->caps.wol = dev_cap->wol;
234 dev->caps.max_gso_sz = dev_cap->max_gso_sz; 230 dev->caps.max_gso_sz = dev_cap->max_gso_sz;
235 231
236 dev->caps.log_num_macs = log_num_mac; 232 dev->caps.log_num_macs = log_num_mac;
@@ -262,6 +258,8 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
262 258
263 mlx4_set_port_mask(dev); 259 mlx4_set_port_mask(dev);
264 260
261 dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
262
265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps; 263 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
266 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] = 264 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
267 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] = 265 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
@@ -839,6 +837,45 @@ err_stop_fw:
839 return err; 837 return err;
840} 838}
841 839
840static int mlx4_init_counters_table(struct mlx4_dev *dev)
841{
842 struct mlx4_priv *priv = mlx4_priv(dev);
843 int nent;
844
845 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
846 return -ENOENT;
847
848 nent = dev->caps.max_counters;
849 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
850}
851
852static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
853{
854 mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
855}
856
857int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
858{
859 struct mlx4_priv *priv = mlx4_priv(dev);
860
861 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
862 return -ENOENT;
863
864 *idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
865 if (*idx == -1)
866 return -ENOMEM;
867
868 return 0;
869}
870EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
871
872void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
873{
874 mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
875 return;
876}
877EXPORT_SYMBOL_GPL(mlx4_counter_free);
878
842static int mlx4_setup_hca(struct mlx4_dev *dev) 879static int mlx4_setup_hca(struct mlx4_dev *dev)
843{ 880{
844 struct mlx4_priv *priv = mlx4_priv(dev); 881 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -943,6 +980,12 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
943 goto err_qp_table_free; 980 goto err_qp_table_free;
944 } 981 }
945 982
983 err = mlx4_init_counters_table(dev);
984 if (err && err != -ENOENT) {
985 mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
986 goto err_counters_table_free;
987 }
988
946 for (port = 1; port <= dev->caps.num_ports; port++) { 989 for (port = 1; port <= dev->caps.num_ports; port++) {
947 enum mlx4_port_type port_type = 0; 990 enum mlx4_port_type port_type = 0;
948 mlx4_SENSE_PORT(dev, port, &port_type); 991 mlx4_SENSE_PORT(dev, port, &port_type);
@@ -969,6 +1012,9 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
969err_mcg_table_free: 1012err_mcg_table_free:
970 mlx4_cleanup_mcg_table(dev); 1013 mlx4_cleanup_mcg_table(dev);
971 1014
1015err_counters_table_free:
1016 mlx4_cleanup_counters_table(dev);
1017
972err_qp_table_free: 1018err_qp_table_free:
973 mlx4_cleanup_qp_table(dev); 1019 mlx4_cleanup_qp_table(dev);
974 1020
@@ -1299,6 +1345,7 @@ err_port:
1299 for (--port; port >= 1; --port) 1345 for (--port; port >= 1; --port)
1300 mlx4_cleanup_port_info(&priv->port[port]); 1346 mlx4_cleanup_port_info(&priv->port[port]);
1301 1347
1348 mlx4_cleanup_counters_table(dev);
1302 mlx4_cleanup_mcg_table(dev); 1349 mlx4_cleanup_mcg_table(dev);
1303 mlx4_cleanup_qp_table(dev); 1350 mlx4_cleanup_qp_table(dev);
1304 mlx4_cleanup_srq_table(dev); 1351 mlx4_cleanup_srq_table(dev);
@@ -1359,6 +1406,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
1359 mlx4_CLOSE_PORT(dev, p); 1406 mlx4_CLOSE_PORT(dev, p);
1360 } 1407 }
1361 1408
1409 mlx4_cleanup_counters_table(dev);
1362 mlx4_cleanup_mcg_table(dev); 1410 mlx4_cleanup_mcg_table(dev);
1363 mlx4_cleanup_qp_table(dev); 1411 mlx4_cleanup_qp_table(dev);
1364 mlx4_cleanup_srq_table(dev); 1412 mlx4_cleanup_srq_table(dev);
diff --git a/drivers/net/mlx4/mcg.c b/drivers/net/mlx4/mcg.c
index e63c37d6a115..cd1784593a3c 100644
--- a/drivers/net/mlx4/mcg.c
+++ b/drivers/net/mlx4/mcg.c
@@ -559,7 +559,8 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
559 struct mlx4_mgm *mgm = mgm_mailbox->buf; 559 struct mlx4_mgm *mgm = mgm_mailbox->buf;
560 u8 *mgid; 560 u8 *mgid;
561 int err; 561 int err;
562 u8 op_mod = (prot == MLX4_PROT_ETH) ? !!(dev->caps.vep_mc_steering) : 0; 562 u8 op_mod = (prot == MLX4_PROT_ETH) ?
563 !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
563 564
564 mailbox = mlx4_alloc_cmd_mailbox(dev); 565 mailbox = mlx4_alloc_cmd_mailbox(dev);
565 if (IS_ERR(mailbox)) 566 if (IS_ERR(mailbox))
@@ -834,7 +835,8 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
834 835
835 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 836 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
836 837
837 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 838 if (prot == MLX4_PROT_ETH &&
839 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
838 return 0; 840 return 0;
839 841
840 if (prot == MLX4_PROT_ETH) 842 if (prot == MLX4_PROT_ETH)
@@ -853,7 +855,8 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
853 855
854 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER; 856 steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
855 857
856 if (prot == MLX4_PROT_ETH && !dev->caps.vep_mc_steering) 858 if (prot == MLX4_PROT_ETH &&
859 !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
857 return 0; 860 return 0;
858 861
859 if (prot == MLX4_PROT_ETH) { 862 if (prot == MLX4_PROT_ETH) {
@@ -867,7 +870,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
867 870
868int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 871int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
869{ 872{
870 if (!dev->caps.vep_mc_steering) 873 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
871 return 0; 874 return 0;
872 875
873 876
@@ -877,7 +880,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
877 880
878int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 881int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
879{ 882{
880 if (!dev->caps.vep_mc_steering) 883 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
881 return 0; 884 return 0;
882 885
883 886
@@ -887,7 +890,7 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
887 890
888int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) 891int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
889{ 892{
890 if (!dev->caps.vep_mc_steering) 893 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
891 return 0; 894 return 0;
892 895
893 896
@@ -897,7 +900,7 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
897 900
898int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) 901int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
899{ 902{
900 if (!dev->caps.vep_mc_steering) 903 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
901 return 0; 904 return 0;
902 905
903 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn); 906 return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
diff --git a/drivers/net/mlx4/mlx4.h b/drivers/net/mlx4/mlx4.h
index dd7d745fbab4..a2fcd8402d37 100644
--- a/drivers/net/mlx4/mlx4.h
+++ b/drivers/net/mlx4/mlx4.h
@@ -48,8 +48,8 @@
48#include <linux/mlx4/doorbell.h> 48#include <linux/mlx4/doorbell.h>
49 49
50#define DRV_NAME "mlx4_core" 50#define DRV_NAME "mlx4_core"
51#define DRV_VERSION "0.01" 51#define DRV_VERSION "1.0"
52#define DRV_RELDATE "May 1, 2007" 52#define DRV_RELDATE "July 14, 2011"
53 53
54enum { 54enum {
55 MLX4_HCR_BASE = 0x80680, 55 MLX4_HCR_BASE = 0x80680,
@@ -342,6 +342,7 @@ struct mlx4_priv {
342 struct mlx4_srq_table srq_table; 342 struct mlx4_srq_table srq_table;
343 struct mlx4_qp_table qp_table; 343 struct mlx4_qp_table qp_table;
344 struct mlx4_mcg_table mcg_table; 344 struct mlx4_mcg_table mcg_table;
345 struct mlx4_bitmap counters_bitmap;
345 346
346 struct mlx4_catas_err catas_err; 347 struct mlx4_catas_err catas_err;
347 348
diff --git a/drivers/net/mlx4/port.c b/drivers/net/mlx4/port.c
index 8856659fb43c..1f95afda6841 100644
--- a/drivers/net/mlx4/port.c
+++ b/drivers/net/mlx4/port.c
@@ -146,7 +146,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
146 int i, err = 0; 146 int i, err = 0;
147 int free = -1; 147 int free = -1;
148 148
149 if (dev->caps.vep_uc_steering) { 149 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1); 150 err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
151 if (!err) { 151 if (!err) {
152 entry = kmalloc(sizeof *entry, GFP_KERNEL); 152 entry = kmalloc(sizeof *entry, GFP_KERNEL);
@@ -203,7 +203,7 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
203 goto out; 203 goto out;
204 } 204 }
205 205
206 if (!dev->caps.vep_uc_steering) 206 if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
207 *qpn = info->base_qpn + free; 207 *qpn = info->base_qpn + free;
208 ++table->total; 208 ++table->total;
209out: 209out:
@@ -243,7 +243,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
243 int index = qpn - info->base_qpn; 243 int index = qpn - info->base_qpn;
244 struct mlx4_mac_entry *entry; 244 struct mlx4_mac_entry *entry;
245 245
246 if (dev->caps.vep_uc_steering) { 246 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
247 entry = radix_tree_lookup(&info->mac_tree, qpn); 247 entry = radix_tree_lookup(&info->mac_tree, qpn);
248 if (entry) { 248 if (entry) {
249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1); 249 mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
@@ -274,7 +274,7 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
274 struct mlx4_mac_entry *entry; 274 struct mlx4_mac_entry *entry;
275 int err; 275 int err;
276 276
277 if (dev->caps.vep_uc_steering) { 277 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
278 entry = radix_tree_lookup(&info->mac_tree, qpn); 278 entry = radix_tree_lookup(&info->mac_tree, qpn);
279 if (!entry) 279 if (!entry)
280 return -EINVAL; 280 return -EINVAL;
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 9a18667c13cc..b56e4587208d 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -123,6 +123,9 @@ enum {
123 /* debug commands */ 123 /* debug commands */
124 MLX4_CMD_QUERY_DEBUG_MSG = 0x2a, 124 MLX4_CMD_QUERY_DEBUG_MSG = 0x2a,
125 MLX4_CMD_SET_DEBUG_MSG = 0x2b, 125 MLX4_CMD_SET_DEBUG_MSG = 0x2b,
126
127 /* statistics commands */
128 MLX4_CMD_QUERY_IF_STAT = 0X54,
126}; 129};
127 130
128enum { 131enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 8985768e2c0d..387329e02303 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -58,22 +58,28 @@ enum {
58}; 58};
59 59
60enum { 60enum {
61 MLX4_DEV_CAP_FLAG_RC = 1 << 0, 61 MLX4_DEV_CAP_FLAG_RC = 1LL << 0,
62 MLX4_DEV_CAP_FLAG_UC = 1 << 1, 62 MLX4_DEV_CAP_FLAG_UC = 1LL << 1,
63 MLX4_DEV_CAP_FLAG_UD = 1 << 2, 63 MLX4_DEV_CAP_FLAG_UD = 1LL << 2,
64 MLX4_DEV_CAP_FLAG_SRQ = 1 << 6, 64 MLX4_DEV_CAP_FLAG_SRQ = 1LL << 6,
65 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1 << 7, 65 MLX4_DEV_CAP_FLAG_IPOIB_CSUM = 1LL << 7,
66 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1 << 8, 66 MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8,
67 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1 << 9, 67 MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9,
68 MLX4_DEV_CAP_FLAG_DPDP = 1 << 12, 68 MLX4_DEV_CAP_FLAG_DPDP = 1LL << 12,
69 MLX4_DEV_CAP_FLAG_BLH = 1 << 15, 69 MLX4_DEV_CAP_FLAG_BLH = 1LL << 15,
70 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1 << 16, 70 MLX4_DEV_CAP_FLAG_MEM_WINDOW = 1LL << 16,
71 MLX4_DEV_CAP_FLAG_APM = 1 << 17, 71 MLX4_DEV_CAP_FLAG_APM = 1LL << 17,
72 MLX4_DEV_CAP_FLAG_ATOMIC = 1 << 18, 72 MLX4_DEV_CAP_FLAG_ATOMIC = 1LL << 18,
73 MLX4_DEV_CAP_FLAG_RAW_MCAST = 1 << 19, 73 MLX4_DEV_CAP_FLAG_RAW_MCAST = 1LL << 19,
74 MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1 << 20, 74 MLX4_DEV_CAP_FLAG_UD_AV_PORT = 1LL << 20,
75 MLX4_DEV_CAP_FLAG_UD_MCAST = 1 << 21, 75 MLX4_DEV_CAP_FLAG_UD_MCAST = 1LL << 21,
76 MLX4_DEV_CAP_FLAG_IBOE = 1 << 30 76 MLX4_DEV_CAP_FLAG_IBOE = 1LL << 30,
77 MLX4_DEV_CAP_FLAG_UC_LOOPBACK = 1LL << 32,
78 MLX4_DEV_CAP_FLAG_WOL = 1LL << 38,
79 MLX4_DEV_CAP_FLAG_UDP_RSS = 1LL << 40,
80 MLX4_DEV_CAP_FLAG_VEP_UC_STEER = 1LL << 41,
81 MLX4_DEV_CAP_FLAG_VEP_MC_STEER = 1LL << 42,
82 MLX4_DEV_CAP_FLAG_COUNTERS = 1LL << 48
77}; 83};
78 84
79enum { 85enum {
@@ -253,15 +259,10 @@ struct mlx4_caps {
253 int mtt_entry_sz; 259 int mtt_entry_sz;
254 u32 max_msg_sz; 260 u32 max_msg_sz;
255 u32 page_size_cap; 261 u32 page_size_cap;
256 u32 flags; 262 u64 flags;
257 u32 bmme_flags; 263 u32 bmme_flags;
258 u32 reserved_lkey; 264 u32 reserved_lkey;
259 u16 stat_rate_support; 265 u16 stat_rate_support;
260 int udp_rss;
261 int loopback_support;
262 int vep_uc_steering;
263 int vep_mc_steering;
264 int wol;
265 u8 port_width_cap[MLX4_MAX_PORTS + 1]; 266 u8 port_width_cap[MLX4_MAX_PORTS + 1];
266 int max_gso_sz; 267 int max_gso_sz;
267 int reserved_qps_cnt[MLX4_NUM_QP_REGION]; 268 int reserved_qps_cnt[MLX4_NUM_QP_REGION];
@@ -274,6 +275,7 @@ struct mlx4_caps {
274 u8 supported_type[MLX4_MAX_PORTS + 1]; 275 u8 supported_type[MLX4_MAX_PORTS + 1];
275 u32 port_mask; 276 u32 port_mask;
276 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; 277 enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1];
278 u32 max_counters;
277}; 279};
278 280
279struct mlx4_buf_list { 281struct mlx4_buf_list {
@@ -438,6 +440,17 @@ union mlx4_ext_av {
438 struct mlx4_eth_av eth; 440 struct mlx4_eth_av eth;
439}; 441};
440 442
443struct mlx4_counter {
444 u8 reserved1[3];
445 u8 counter_mode;
446 __be32 num_ifc;
447 u32 reserved2[2];
448 __be64 rx_frames;
449 __be64 rx_bytes;
450 __be64 tx_frames;
451 __be64 tx_bytes;
452};
453
441struct mlx4_dev { 454struct mlx4_dev {
442 struct pci_dev *pdev; 455 struct pci_dev *pdev;
443 unsigned long flags; 456 unsigned long flags;
@@ -568,4 +581,7 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec);
568int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 581int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
569int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 582int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
570 583
584int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx);
585void mlx4_counter_free(struct mlx4_dev *dev, u32 idx);
586
571#endif /* MLX4_DEVICE_H */ 587#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 9e9eb21056ca..4001c8249dbb 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -54,7 +54,8 @@ enum mlx4_qp_optpar {
54 MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12, 54 MLX4_QP_OPTPAR_RETRY_COUNT = 1 << 12,
55 MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13, 55 MLX4_QP_OPTPAR_RNR_RETRY = 1 << 13,
56 MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, 56 MLX4_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
57 MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16 57 MLX4_QP_OPTPAR_SCHED_QUEUE = 1 << 16,
58 MLX4_QP_OPTPAR_COUNTER_INDEX = 1 << 20
58}; 59};
59 60
60enum mlx4_qp_state { 61enum mlx4_qp_state {
@@ -99,7 +100,7 @@ struct mlx4_qp_path {
99 u8 fl; 100 u8 fl;
100 u8 reserved1[2]; 101 u8 reserved1[2];
101 u8 pkey_index; 102 u8 pkey_index;
102 u8 reserved2; 103 u8 counter_index;
103 u8 grh_mylmc; 104 u8 grh_mylmc;
104 __be16 rlid; 105 __be16 rlid;
105 u8 ackto; 106 u8 ackto;
@@ -111,8 +112,7 @@ struct mlx4_qp_path {
111 u8 sched_queue; 112 u8 sched_queue;
112 u8 vlan_index; 113 u8 vlan_index;
113 u8 reserved3[2]; 114 u8 reserved3[2];
114 u8 counter_index; 115 u8 reserved4[2];
115 u8 reserved4;
116 u8 dmac[6]; 116 u8 dmac[6];
117}; 117};
118 118
diff --git a/include/rdma/ib_pma.h b/include/rdma/ib_pma.h
new file mode 100644
index 000000000000..a5889f18807b
--- /dev/null
+++ b/include/rdma/ib_pma.h
@@ -0,0 +1,156 @@
1/*
2 * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation.
3 * All rights reserved.
4 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#if !defined(IB_PMA_H)
36#define IB_PMA_H
37
38#include <rdma/ib_mad.h>
39
40/*
41 * PMA class portinfo capability mask bits
42 */
43#define IB_PMA_CLASS_CAP_ALLPORTSELECT cpu_to_be16(1 << 8)
44#define IB_PMA_CLASS_CAP_EXT_WIDTH cpu_to_be16(1 << 9)
45#define IB_PMA_CLASS_CAP_XMIT_WAIT cpu_to_be16(1 << 12)
46
47#define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001)
48#define IB_PMA_PORT_SAMPLES_CONTROL cpu_to_be16(0x0010)
49#define IB_PMA_PORT_SAMPLES_RESULT cpu_to_be16(0x0011)
50#define IB_PMA_PORT_COUNTERS cpu_to_be16(0x0012)
51#define IB_PMA_PORT_COUNTERS_EXT cpu_to_be16(0x001D)
52#define IB_PMA_PORT_SAMPLES_RESULT_EXT cpu_to_be16(0x001E)
53
54struct ib_pma_mad {
55 struct ib_mad_hdr mad_hdr;
56 u8 reserved[40];
57 u8 data[192];
58} __packed;
59
60struct ib_pma_portsamplescontrol {
61 u8 opcode;
62 u8 port_select;
63 u8 tick;
64 u8 counter_width; /* resv: 7:3, counter width: 2:0 */
65 __be32 counter_mask0_9; /* 2, 10 3-bit fields */
66 __be16 counter_mask10_14; /* 1, 5 3-bit fields */
67 u8 sample_mechanisms;
68 u8 sample_status; /* only lower 2 bits */
69 __be64 option_mask;
70 __be64 vendor_mask;
71 __be32 sample_start;
72 __be32 sample_interval;
73 __be16 tag;
74 __be16 counter_select[15];
75 __be32 reserved1;
76 __be64 samples_only_option_mask;
77 __be32 reserved2[28];
78};
79
80struct ib_pma_portsamplesresult {
81 __be16 tag;
82 __be16 sample_status; /* only lower 2 bits */
83 __be32 counter[15];
84};
85
86struct ib_pma_portsamplesresult_ext {
87 __be16 tag;
88 __be16 sample_status; /* only lower 2 bits */
89 __be32 extended_width; /* only upper 2 bits */
90 __be64 counter[15];
91};
92
93struct ib_pma_portcounters {
94 u8 reserved;
95 u8 port_select;
96 __be16 counter_select;
97 __be16 symbol_error_counter;
98 u8 link_error_recovery_counter;
99 u8 link_downed_counter;
100 __be16 port_rcv_errors;
101 __be16 port_rcv_remphys_errors;
102 __be16 port_rcv_switch_relay_errors;
103 __be16 port_xmit_discards;
104 u8 port_xmit_constraint_errors;
105 u8 port_rcv_constraint_errors;
106 u8 reserved1;
107 u8 link_overrun_errors; /* LocalLink: 7:4, BufferOverrun: 3:0 */
108 __be16 reserved2;
109 __be16 vl15_dropped;
110 __be32 port_xmit_data;
111 __be32 port_rcv_data;
112 __be32 port_xmit_packets;
113 __be32 port_rcv_packets;
114 __be32 port_xmit_wait;
115} __packed;
116
117
118#define IB_PMA_SEL_SYMBOL_ERROR cpu_to_be16(0x0001)
119#define IB_PMA_SEL_LINK_ERROR_RECOVERY cpu_to_be16(0x0002)
120#define IB_PMA_SEL_LINK_DOWNED cpu_to_be16(0x0004)
121#define IB_PMA_SEL_PORT_RCV_ERRORS cpu_to_be16(0x0008)
122#define IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS cpu_to_be16(0x0010)
123#define IB_PMA_SEL_PORT_XMIT_DISCARDS cpu_to_be16(0x0040)
124#define IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS cpu_to_be16(0x0200)
125#define IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS cpu_to_be16(0x0400)
126#define IB_PMA_SEL_PORT_VL15_DROPPED cpu_to_be16(0x0800)
127#define IB_PMA_SEL_PORT_XMIT_DATA cpu_to_be16(0x1000)
128#define IB_PMA_SEL_PORT_RCV_DATA cpu_to_be16(0x2000)
129#define IB_PMA_SEL_PORT_XMIT_PACKETS cpu_to_be16(0x4000)
130#define IB_PMA_SEL_PORT_RCV_PACKETS cpu_to_be16(0x8000)
131
132struct ib_pma_portcounters_ext {
133 u8 reserved;
134 u8 port_select;
135 __be16 counter_select;
136 __be32 reserved1;
137 __be64 port_xmit_data;
138 __be64 port_rcv_data;
139 __be64 port_xmit_packets;
140 __be64 port_rcv_packets;
141 __be64 port_unicast_xmit_packets;
142 __be64 port_unicast_rcv_packets;
143 __be64 port_multicast_xmit_packets;
144 __be64 port_multicast_rcv_packets;
145} __packed;
146
147#define IB_PMA_SELX_PORT_XMIT_DATA cpu_to_be16(0x0001)
148#define IB_PMA_SELX_PORT_RCV_DATA cpu_to_be16(0x0002)
149#define IB_PMA_SELX_PORT_XMIT_PACKETS cpu_to_be16(0x0004)
150#define IB_PMA_SELX_PORT_RCV_PACKETS cpu_to_be16(0x0008)
151#define IB_PMA_SELX_PORT_UNI_XMIT_PACKETS cpu_to_be16(0x0010)
152#define IB_PMA_SELX_PORT_UNI_RCV_PACKETS cpu_to_be16(0x0020)
153#define IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS cpu_to_be16(0x0040)
154#define IB_PMA_SELX_PORT_MULTI_RCV_PACKETS cpu_to_be16(0x0080)
155
156#endif /* IB_PMA_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 55cd0a0bc977..bf4306aea169 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -350,7 +350,8 @@ enum ib_event_type {
350 IB_EVENT_SRQ_ERR, 350 IB_EVENT_SRQ_ERR,
351 IB_EVENT_SRQ_LIMIT_REACHED, 351 IB_EVENT_SRQ_LIMIT_REACHED,
352 IB_EVENT_QP_LAST_WQE_REACHED, 352 IB_EVENT_QP_LAST_WQE_REACHED,
353 IB_EVENT_CLIENT_REREGISTER 353 IB_EVENT_CLIENT_REREGISTER,
354 IB_EVENT_GID_CHANGE,
354}; 355};
355 356
356struct ib_event { 357struct ib_event {