aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c32
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h28
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c112
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c11
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h2
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.c19
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c17
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_profile.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c14
-rw-r--r--drivers/net/mlx4/eq.c4
-rw-r--r--drivers/net/mlx4/main.c14
-rw-r--r--drivers/net/mlx4/mr.c6
-rw-r--r--drivers/net/mlx4/profile.c2
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/qp.h1
26 files changed, 183 insertions, 136 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index bb17cce3cb59..f5c45b194f53 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
133 struct c2_qp *qp; 133 struct c2_qp *qp;
134 int is_recv = 0; 134 int is_recv = 0;
135 135
136 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); 136 ce = c2_mq_consume(&cq->mq);
137 if (!ce) { 137 if (!ce) {
138 return -EAGAIN; 138 return -EAGAIN;
139 } 139 }
@@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev,
146 while ((qp = 146 while ((qp =
147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { 147 (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) {
148 c2_mq_free(&cq->mq); 148 c2_mq_free(&cq->mq);
149 ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); 149 ce = c2_mq_consume(&cq->mq);
150 if (!ce) 150 if (!ce)
151 return -EAGAIN; 151 return -EAGAIN;
152 } 152 }
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index ff9be1a13106..32e3b1461d81 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -176,7 +176,7 @@ struct t3_send_wr {
176 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ 176 struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */
177}; 177};
178 178
179#define T3_MAX_FASTREG_DEPTH 24 179#define T3_MAX_FASTREG_DEPTH 10
180#define T3_MAX_FASTREG_FRAG 10 180#define T3_MAX_FASTREG_FRAG 10
181 181
182struct t3_fastreg_wr { 182struct t3_fastreg_wr {
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 160ef482712d..e2a63214008a 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -40,6 +40,7 @@
40#include <linux/spinlock.h> 40#include <linux/spinlock.h>
41#include <linux/ethtool.h> 41#include <linux/ethtool.h>
42#include <linux/rtnetlink.h> 42#include <linux/rtnetlink.h>
43#include <linux/inetdevice.h>
43 44
44#include <asm/io.h> 45#include <asm/io.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
@@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev,
1152static int iwch_query_port(struct ib_device *ibdev, 1153static int iwch_query_port(struct ib_device *ibdev,
1153 u8 port, struct ib_port_attr *props) 1154 u8 port, struct ib_port_attr *props)
1154{ 1155{
1156 struct iwch_dev *dev;
1157 struct net_device *netdev;
1158 struct in_device *inetdev;
1159
1155 PDBG("%s ibdev %p\n", __func__, ibdev); 1160 PDBG("%s ibdev %p\n", __func__, ibdev);
1156 1161
1162 dev = to_iwch_dev(ibdev);
1163 netdev = dev->rdev.port_info.lldevs[port-1];
1164
1157 memset(props, 0, sizeof(struct ib_port_attr)); 1165 memset(props, 0, sizeof(struct ib_port_attr));
1158 props->max_mtu = IB_MTU_4096; 1166 props->max_mtu = IB_MTU_4096;
1159 props->active_mtu = IB_MTU_2048; 1167 if (netdev->mtu >= 4096)
1160 props->state = IB_PORT_ACTIVE; 1168 props->active_mtu = IB_MTU_4096;
1169 else if (netdev->mtu >= 2048)
1170 props->active_mtu = IB_MTU_2048;
1171 else if (netdev->mtu >= 1024)
1172 props->active_mtu = IB_MTU_1024;
1173 else if (netdev->mtu >= 512)
1174 props->active_mtu = IB_MTU_512;
1175 else
1176 props->active_mtu = IB_MTU_256;
1177
1178 if (!netif_carrier_ok(netdev))
1179 props->state = IB_PORT_DOWN;
1180 else {
1181 inetdev = in_dev_get(netdev);
1182 if (inetdev->ifa_list)
1183 props->state = IB_PORT_ACTIVE;
1184 else
1185 props->state = IB_PORT_INIT;
1186 in_dev_put(inetdev);
1187 }
1188
1161 props->port_cap_flags = 1189 props->port_cap_flags =
1162 IB_PORT_CM_SUP | 1190 IB_PORT_CM_SUP |
1163 IB_PORT_SNMP_TUNNEL_SUP | 1191 IB_PORT_SNMP_TUNNEL_SUP |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index 1798e6466bd0..689c35786dd2 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block {
165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) 165#define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7)
166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) 166#define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8)
167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) 167#define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9)
168#define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31)
169#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) 168#define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11)
170#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) 169#define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12)
171#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) 170#define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13)
@@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block {
176#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) 175#define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18)
177#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) 176#define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19)
178#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) 177#define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20)
179#define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31)
180#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) 178#define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21)
181#define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31)
182#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) 179#define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22)
183#define MQPCB_DLID EHCA_BMASK_IBM(16, 31)
184#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) 180#define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23)
185#define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31)
186#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) 181#define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24)
187#define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31)
188#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) 182#define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25)
189#define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31)
190#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) 183#define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26)
191#define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31)
192#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) 184#define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27)
193#define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31)
194#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) 185#define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28)
195#define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31)
196#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) 186#define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30)
197#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) 187#define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31)
198#define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31)
199#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) 188#define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32)
200#define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31)
201#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) 189#define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33)
202#define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
203#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) 190#define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34)
204#define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31)
205#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) 191#define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35)
206#define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31)
207#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) 192#define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36)
208#define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31)
209#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) 193#define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37)
210#define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31)
211#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) 194#define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38)
212#define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31)
213#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) 195#define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39)
214#define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31)
215#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) 196#define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40)
216#define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31)
217#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) 197#define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41)
218#define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31)
219#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) 198#define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42)
220#define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31)
221#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) 199#define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44)
222#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) 200#define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45)
223#define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
224#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) 201#define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46)
225#define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31)
226#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) 202#define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47)
227#define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31)
228#define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31)
229#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) 203#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48)
230#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31)
231#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) 204#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49)
232#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31)
233#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) 205#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50)
234#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) 206#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51)
235 207
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 99bcbd7ffb0a..4b89b791be6a 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data)
479 struct ehca_eqe *eqe; 479 struct ehca_eqe *eqe;
480 u64 ret; 480 u64 ret;
481 481
482 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); 482 eqe = ehca_poll_eq(shca, &shca->neq);
483 483
484 while (eqe) { 484 while (eqe) {
485 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) 485 if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry))
486 parse_ec(shca, eqe->entry); 486 parse_ec(shca, eqe->entry);
487 487
488 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); 488 eqe = ehca_poll_eq(shca, &shca->neq);
489 } 489 }
490 490
491 ret = hipz_h_reset_event(shca->ipz_hca_handle, 491 ret = hipz_h_reset_event(shca->ipz_hca_handle,
@@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
572 eqe_cnt = 0; 572 eqe_cnt = 0;
573 do { 573 do {
574 u32 token; 574 u32 token;
575 eqe_cache[eqe_cnt].eqe = 575 eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq);
576 (struct ehca_eqe *)ehca_poll_eq(shca, eq);
577 if (!eqe_cache[eqe_cnt].eqe) 576 if (!eqe_cache[eqe_cnt].eqe)
578 break; 577 break;
579 eqe_value = eqe_cache[eqe_cnt].eqe->entry; 578 eqe_value = eqe_cache[eqe_cnt].eqe->entry;
@@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
637 goto unlock_irq_spinlock; 636 goto unlock_irq_spinlock;
638 do { 637 do {
639 struct ehca_eqe *eqe; 638 struct ehca_eqe *eqe;
640 eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); 639 eqe = ehca_poll_eq(shca, &shca->eq);
641 if (!eqe) 640 if (!eqe)
642 break; 641 break;
643 process_eqe(shca, eqe); 642 process_eqe(shca, eqe);
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 368311ce332b..85905ab9391f 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -52,7 +52,7 @@
52#include "ehca_tools.h" 52#include "ehca_tools.h"
53#include "hcp_if.h" 53#include "hcp_if.h"
54 54
55#define HCAD_VERSION "0026" 55#define HCAD_VERSION "0027"
56 56
57MODULE_LICENSE("Dual BSD/GPL"); 57MODULE_LICENSE("Dual BSD/GPL");
58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); 58MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00c108159714..0338f1fabe8a 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
461 ib_device); 461 ib_device);
462 struct ib_ucontext *context = NULL; 462 struct ib_ucontext *context = NULL;
463 u64 h_ret; 463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0; 464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret; 465 int qp_type, max_send_sge, max_recv_sge, ret;
466 466
467 /* h_call's out parameters */ 467 /* h_call's out parameters */
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
609 } 609 }
610 } 610 }
611 611
612 if (pd->uobject && udata)
613 context = pd->uobject->context;
614
615 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
616 if (!my_qp) { 613 if (!my_qp) {
617 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
619 return ERR_PTR(-ENOMEM); 616 return ERR_PTR(-ENOMEM);
620 } 617 }
621 618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
622 atomic_set(&my_qp->nr_events, 0); 624 atomic_set(&my_qp->nr_events, 0);
623 init_waitqueue_head(&my_qp->wait_completion); 625 init_waitqueue_head(&my_qp->wait_completion);
624 spin_lock_init(&my_qp->spinlock_s); 626 spin_lock_init(&my_qp->spinlock_s);
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
707 (parms.squeue.is_small || parms.rqueue.is_small); 709 (parms.squeue.is_small || parms.rqueue.is_small);
708 } 710 }
709 711
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
711 if (h_ret != H_SUCCESS) { 713 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 715 h_ret);
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
769 goto create_qp_exit2; 771 goto create_qp_exit2;
770 } 772 }
771 773
772 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / 774 if (!is_user) {
773 my_qp->ipz_squeue.qe_size; 775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
774 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * 776 my_qp->ipz_squeue.qe_size;
775 sizeof(struct ehca_qmap_entry)); 777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
776 if (!my_qp->sq_map.map) { 778 sizeof(struct ehca_qmap_entry));
777 ehca_err(pd->device, "Couldn't allocate squeue " 779 if (!my_qp->sq_map.map) {
778 "map ret=%i", ret); 780 ehca_err(pd->device, "Couldn't allocate squeue "
779 goto create_qp_exit3; 781 "map ret=%i", ret);
782 goto create_qp_exit3;
783 }
784 INIT_LIST_HEAD(&my_qp->sq_err_node);
785 /* to avoid the generation of bogus flush CQEs */
786 reset_queue_map(&my_qp->sq_map);
780 } 787 }
781 INIT_LIST_HEAD(&my_qp->sq_err_node);
782 /* to avoid the generation of bogus flush CQEs */
783 reset_queue_map(&my_qp->sq_map);
784 } 788 }
785 789
786 if (HAS_RQ(my_qp)) { 790 if (HAS_RQ(my_qp)) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
792 "and pages ret=%i", ret); 796 "and pages ret=%i", ret);
793 goto create_qp_exit4; 797 goto create_qp_exit4;
794 } 798 }
795 799 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / 800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size; 801 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * 802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry)); 803 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) { 804 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue " 805 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret); 806 "map ret=%i", ret);
803 goto create_qp_exit5; 807 goto create_qp_exit5;
808 }
809 INIT_LIST_HEAD(&my_qp->rq_err_node);
810 /* to avoid the generation of bogus flush CQEs */
811 reset_queue_map(&my_qp->rq_map);
804 } 812 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node); 813 } else if (init_attr->srq && !is_user) {
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 } else if (init_attr->srq) {
809 /* this is a base QP, use the queue map of the SRQ */ 814 /* this is a base QP, use the queue map of the SRQ */
810 my_qp->rq_map = my_srq->rq_map; 815 my_qp->rq_map = my_srq->rq_map;
811 INIT_LIST_HEAD(&my_qp->rq_err_node); 816 INIT_LIST_HEAD(&my_qp->rq_err_node);
@@ -918,7 +923,7 @@ create_qp_exit7:
918 kfree(my_qp->mod_qp_parm); 923 kfree(my_qp->mod_qp_parm);
919 924
920create_qp_exit6: 925create_qp_exit6:
921 if (HAS_RQ(my_qp)) 926 if (HAS_RQ(my_qp) && !is_user)
922 vfree(my_qp->rq_map.map); 927 vfree(my_qp->rq_map.map);
923 928
924create_qp_exit5: 929create_qp_exit5:
@@ -926,7 +931,7 @@ create_qp_exit5:
926 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
927 932
928create_qp_exit4: 933create_qp_exit4:
929 if (HAS_SQ(my_qp)) 934 if (HAS_SQ(my_qp) && !is_user)
930 vfree(my_qp->sq_map.map); 935 vfree(my_qp->sq_map.map);
931 936
932create_qp_exit3: 937create_qp_exit3:
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 u64 update_mask; 1249 u64 update_mask;
1245 u64 h_ret; 1250 u64 h_ret;
1246 int bad_wqe_cnt = 0; 1251 int bad_wqe_cnt = 0;
1252 int is_user = 0;
1247 int squeue_locked = 0; 1253 int squeue_locked = 0;
1248 unsigned long flags = 0; 1254 unsigned long flags = 0;
1249 1255
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1266 ret = ehca2ib_return_code(h_ret); 1272 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1273 goto modify_qp_exit1;
1268 } 1274 }
1275 if (ibqp->uobject)
1276 is_user = 1;
1269 1277
1270 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); 1278 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1271 1279
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1728 goto modify_qp_exit2; 1736 goto modify_qp_exit2;
1729 } 1737 }
1730 } 1738 }
1731 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { 1739 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1740 && !is_user) {
1732 ret = check_for_left_cqes(my_qp, shca); 1741 ret = check_for_left_cqes(my_qp, shca);
1733 if (ret) 1742 if (ret)
1734 goto modify_qp_exit2; 1743 goto modify_qp_exit2;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1738 ipz_qeit_reset(&my_qp->ipz_rqueue); 1747 ipz_qeit_reset(&my_qp->ipz_rqueue);
1739 ipz_qeit_reset(&my_qp->ipz_squeue); 1748 ipz_qeit_reset(&my_qp->ipz_squeue);
1740 1749
1741 if (qp_cur_state == IB_QPS_ERR) { 1750 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1742 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 1751 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1743 1752
1744 if (HAS_RQ(my_qp)) 1753 if (HAS_RQ(my_qp))
1745 del_from_err_list(my_qp->recv_cq, 1754 del_from_err_list(my_qp->recv_cq,
1746 &my_qp->rq_err_node); 1755 &my_qp->rq_err_node);
1747 } 1756 }
1748 reset_queue_map(&my_qp->sq_map); 1757 if (!is_user)
1758 reset_queue_map(&my_qp->sq_map);
1749 1759
1750 if (HAS_RQ(my_qp)) 1760 if (HAS_RQ(my_qp) && !is_user)
1751 reset_queue_map(&my_qp->rq_map); 1761 reset_queue_map(&my_qp->rq_map);
1752 } 1762 }
1753 1763
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
1952 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; 1962 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1953 qp_attr->dest_qp_num = qpcb->dest_qp_nr; 1963 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1954 1964
1955 qp_attr->pkey_index = 1965 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1956 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); 1966 qp_attr->port_num = qpcb->prim_phys_port;
1957
1958 qp_attr->port_num =
1959 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
1960
1961 qp_attr->timeout = qpcb->timeout; 1967 qp_attr->timeout = qpcb->timeout;
1962 qp_attr->retry_cnt = qpcb->retry_count; 1968 qp_attr->retry_cnt = qpcb->retry_count;
1963 qp_attr->rnr_retry = qpcb->rnr_retry_count; 1969 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1964 1970
1965 qp_attr->alt_pkey_index = 1971 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1966 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
1967
1968 qp_attr->alt_port_num = qpcb->alt_phys_port; 1972 qp_attr->alt_port_num = qpcb->alt_phys_port;
1969 qp_attr->alt_timeout = qpcb->timeout_al; 1973 qp_attr->alt_timeout = qpcb->timeout_al;
1970 1974
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2051 update_mask |= 2055 update_mask |=
2052 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) 2056 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2053 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); 2057 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2054 mqpcb->curr_srq_limit = 2058 mqpcb->curr_srq_limit = attr->srq_limit;
2055 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
2056 mqpcb->qp_aff_asyn_ev_log_reg = 2059 mqpcb->qp_aff_asyn_ev_log_reg =
2057 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); 2060 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2058 } 2061 }
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2115 2118
2116 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; 2119 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2117 srq_attr->max_sge = 3; 2120 srq_attr->max_sge = 3;
2118 srq_attr->srq_limit = EHCA_BMASK_GET( 2121 srq_attr->srq_limit = qpcb->curr_srq_limit;
2119 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
2120 2122
2121 if (ehca_debug_level >= 2) 2123 if (ehca_debug_level >= 2)
2122 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 2124 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2138 int ret; 2140 int ret;
2139 u64 h_ret; 2141 u64 h_ret;
2140 u8 port_num; 2142 u8 port_num;
2143 int is_user = 0;
2141 enum ib_qp_type qp_type; 2144 enum ib_qp_type qp_type;
2142 unsigned long flags; 2145 unsigned long flags;
2143 2146
2144 if (uobject) { 2147 if (uobject) {
2148 is_user = 1;
2145 if (my_qp->mm_count_galpa || 2149 if (my_qp->mm_count_galpa ||
2146 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 2150 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2147 ehca_err(dev, "Resources still referenced in " 2151 ehca_err(dev, "Resources still referenced in "
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2168 * SRQs will never get into an error list and do not have a recv_cq, 2172 * SRQs will never get into an error list and do not have a recv_cq,
2169 * so we need to skip them here. 2173 * so we need to skip them here.
2170 */ 2174 */
2171 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) 2175 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2172 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); 2176 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2173 2177
2174 if (HAS_SQ(my_qp)) 2178 if (HAS_SQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 2179 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2176 2180
2177 /* now wait until all pending events have completed */ 2181 /* now wait until all pending events have completed */
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2209 2213
2210 if (HAS_RQ(my_qp)) { 2214 if (HAS_RQ(my_qp)) {
2211 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2215 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2212 2216 if (!is_user)
2213 vfree(my_qp->rq_map.map); 2217 vfree(my_qp->rq_map.map);
2214 } 2218 }
2215 if (HAS_SQ(my_qp)) { 2219 if (HAS_SQ(my_qp)) {
2216 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2220 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2217 2221 if (!is_user)
2218 vfree(my_qp->sq_map.map); 2222 vfree(my_qp->sq_map.map);
2219 } 2223 }
2220 kmem_cache_free(qp_cache, my_qp); 2224 kmem_cache_free(qp_cache, my_qp);
2221 atomic_dec(&shca->num_qps); 2225 atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index d0ab0c0d5e91..4d5dc3304d42 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
284 param->act_pages = (u32)outs[4]; 284 param->act_pages = (u32)outs[4];
285 285
286 if (ret == H_SUCCESS) 286 if (ret == H_SUCCESS)
287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 287 hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
288 288
289 if (ret == H_NOT_ENOUGH_RESOURCES) 289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%lli", ret); 290 ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
293} 293}
294 294
295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
296 struct ehca_alloc_qp_parms *parms) 296 struct ehca_alloc_qp_parms *parms, int is_user)
297{ 297{
298 u64 ret; 298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12; 299 u64 allocate_controls, max_r10_reg, r11, r12;
@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); 359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
360 360
361 if (ret == H_SUCCESS) 361 if (ret == H_SUCCESS)
362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 362 hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
363 363
364 if (ret == H_NOT_ENOUGH_RESOURCES) 364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%lli", ret); 365 ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2c3c6e0ea5c2..39c1c3618ec7 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
78 * initialize resources, create empty QPPTs (2 rings). 78 * initialize resources, create empty QPPTs (2 rings).
79 */ 79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_alloc_qp_parms *parms); 81 struct ehca_alloc_qp_parms *parms, int is_user);
82 82
83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, 83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
84 const u8 port_id, 84 const u8 port_id,
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 214821095cb1..b3e0e72e8a73 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
54 return 0; 54 return 0;
55} 55}
56 56
57int hcp_galpas_ctor(struct h_galpas *galpas, 57int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
58 u64 paddr_kernel, u64 paddr_user) 58 u64 paddr_kernel, u64 paddr_user)
59{ 59{
60 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); 60 if (!is_user) {
61 if (ret) 61 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
62 return ret; 62 if (ret)
63 return ret;
64 } else
65 galpas->kernel.fw_handle = 0;
63 66
64 galpas->user.fw_handle = paddr_user; 67 galpas->user.fw_handle = paddr_user;
65 68
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 5305c2a3ed94..204227d5303a 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
78 *(volatile u64 __force *)addr = value; 78 *(volatile u64 __force *)addr = value;
79} 79}
80 80
81int hcp_galpas_ctor(struct h_galpas *galpas, 81int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
82 u64 paddr_kernel, u64 paddr_user); 82 u64 paddr_kernel, u64 paddr_user);
83 83
84int hcp_galpas_dtor(struct h_galpas *galpas); 84int hcp_galpas_dtor(struct h_galpas *galpas);
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
index c3a328465431..1227c593627a 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c
@@ -220,10 +220,13 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
220 queue->small_page = NULL; 220 queue->small_page = NULL;
221 221
222 /* allocate queue page pointers */ 222 /* allocate queue page pointers */
223 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); 223 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
224 if (!queue->queue_pages) { 224 if (!queue->queue_pages) {
225 ehca_gen_err("Couldn't allocate queue page list"); 225 queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *));
226 return 0; 226 if (!queue->queue_pages) {
227 ehca_gen_err("Couldn't allocate queue page list");
228 return 0;
229 }
227 } 230 }
228 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); 231 memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *));
229 232
@@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue,
240ipz_queue_ctor_exit0: 243ipz_queue_ctor_exit0:
241 ehca_gen_err("Couldn't alloc pages queue=%p " 244 ehca_gen_err("Couldn't alloc pages queue=%p "
242 "nr_of_pages=%x", queue, nr_of_pages); 245 "nr_of_pages=%x", queue, nr_of_pages);
243 vfree(queue->queue_pages); 246 if (is_vmalloc_addr(queue->queue_pages))
247 vfree(queue->queue_pages);
248 else
249 kfree(queue->queue_pages);
244 250
245 return 0; 251 return 0;
246} 252}
@@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue)
262 free_page((unsigned long)queue->queue_pages[i]); 268 free_page((unsigned long)queue->queue_pages[i]);
263 } 269 }
264 270
265 vfree(queue->queue_pages); 271 if (is_vmalloc_addr(queue->queue_pages))
272 vfree(queue->queue_pages);
273 else
274 kfree(queue->queue_pages);
266 275
267 return 1; 276 return 1;
268} 277}
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 20724aee76f4..c4a02648c8af 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1585 break; 1585 break;
1586 1586
1587 case IB_WR_LOCAL_INV: 1587 case IB_WR_LOCAL_INV:
1588 ctrl->srcrb_flags |=
1589 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1588 set_local_inv_seg(wqe, wr->ex.invalidate_rkey); 1590 set_local_inv_seg(wqe, wr->ex.invalidate_rkey);
1589 wqe += sizeof (struct mlx4_wqe_local_inval_seg); 1591 wqe += sizeof (struct mlx4_wqe_local_inval_seg);
1590 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; 1592 size += sizeof (struct mlx4_wqe_local_inval_seg) / 16;
1591 break; 1593 break;
1592 1594
1593 case IB_WR_FAST_REG_MR: 1595 case IB_WR_FAST_REG_MR:
1596 ctrl->srcrb_flags |=
1597 cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER);
1594 set_fmr_seg(wqe, wr); 1598 set_fmr_seg(wqe, wr);
1595 wqe += sizeof (struct mlx4_wqe_fmr_seg); 1599 wqe += sizeof (struct mlx4_wqe_fmr_seg);
1596 size += sizeof (struct mlx4_wqe_fmr_seg) / 16; 1600 size += sizeof (struct mlx4_wqe_fmr_seg) / 16;
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 6d55f9d748f6..8c2ed994d540 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1059,7 +1059,7 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); 1059 MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET);
1060 if (mthca_is_memfree(dev)) 1060 if (mthca_is_memfree(dev))
1061 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64), 1061 dev_lim->reserved_mtts = ALIGN((1 << (field >> 4)) * sizeof(u64),
1062 MTHCA_MTT_SEG_SIZE) / MTHCA_MTT_SEG_SIZE; 1062 dev->limits.mtt_seg_size) / dev->limits.mtt_seg_size;
1063 else 1063 else
1064 dev_lim->reserved_mtts = 1 << (field >> 4); 1064 dev_lim->reserved_mtts = 1 << (field >> 4);
1065 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); 1065 MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 252590116df5..9ef611f6dd36 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -159,6 +159,7 @@ struct mthca_limits {
159 int reserved_eqs; 159 int reserved_eqs;
160 int num_mpts; 160 int num_mpts;
161 int num_mtt_segs; 161 int num_mtt_segs;
162 int mtt_seg_size;
162 int fmr_reserved_mtts; 163 int fmr_reserved_mtts;
163 int reserved_mtts; 164 int reserved_mtts;
164 int reserved_mrws; 165 int reserved_mrws;
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index 28f0e0c40d7d..90e4e450a120 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -641,9 +641,11 @@ static void mthca_free_irqs(struct mthca_dev *dev)
641 if (dev->eq_table.have_irq) 641 if (dev->eq_table.have_irq)
642 free_irq(dev->pdev->irq, dev); 642 free_irq(dev->pdev->irq, dev);
643 for (i = 0; i < MTHCA_NUM_EQ; ++i) 643 for (i = 0; i < MTHCA_NUM_EQ; ++i)
644 if (dev->eq_table.eq[i].have_irq) 644 if (dev->eq_table.eq[i].have_irq) {
645 free_irq(dev->eq_table.eq[i].msi_x_vector, 645 free_irq(dev->eq_table.eq[i].msi_x_vector,
646 dev->eq_table.eq + i); 646 dev->eq_table.eq + i);
647 dev->eq_table.eq[i].have_irq = 0;
648 }
647} 649}
648 650
649static int mthca_map_reg(struct mthca_dev *dev, 651static int mthca_map_reg(struct mthca_dev *dev,
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 1d83cf7caf38..13da9f1d24c0 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -125,6 +125,10 @@ module_param_named(fmr_reserved_mtts, hca_profile.fmr_reserved_mtts, int, 0444);
125MODULE_PARM_DESC(fmr_reserved_mtts, 125MODULE_PARM_DESC(fmr_reserved_mtts,
126 "number of memory translation table segments reserved for FMR"); 126 "number of memory translation table segments reserved for FMR");
127 127
128static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
129module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
130MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
131
128static char mthca_version[] __devinitdata = 132static char mthca_version[] __devinitdata =
129 DRV_NAME ": Mellanox InfiniBand HCA driver v" 133 DRV_NAME ": Mellanox InfiniBand HCA driver v"
130 DRV_VERSION " (" DRV_RELDATE ")\n"; 134 DRV_VERSION " (" DRV_RELDATE ")\n";
@@ -162,6 +166,7 @@ static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim)
162 int err; 166 int err;
163 u8 status; 167 u8 status;
164 168
169 mdev->limits.mtt_seg_size = (1 << log_mtts_per_seg) * 8;
165 err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); 170 err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status);
166 if (err) { 171 if (err) {
167 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n"); 172 mthca_err(mdev, "QUERY_DEV_LIM command failed, aborting.\n");
@@ -460,11 +465,11 @@ static int mthca_init_icm(struct mthca_dev *mdev,
460 } 465 }
461 466
462 /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */ 467 /* CPU writes to non-reserved MTTs, while HCA might DMA to reserved mtts */
463 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * MTHCA_MTT_SEG_SIZE, 468 mdev->limits.reserved_mtts = ALIGN(mdev->limits.reserved_mtts * mdev->limits.mtt_seg_size,
464 dma_get_cache_alignment()) / MTHCA_MTT_SEG_SIZE; 469 dma_get_cache_alignment()) / mdev->limits.mtt_seg_size;
465 470
466 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, 471 mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
467 MTHCA_MTT_SEG_SIZE, 472 mdev->limits.mtt_seg_size,
468 mdev->limits.num_mtt_segs, 473 mdev->limits.num_mtt_segs,
469 mdev->limits.reserved_mtts, 474 mdev->limits.reserved_mtts,
470 1, 0); 475 1, 0);
@@ -1315,6 +1320,12 @@ static void __init mthca_validate_profile(void)
1315 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n", 1320 printk(KERN_WARNING PFX "Corrected fmr_reserved_mtts to %d.\n",
1316 hca_profile.fmr_reserved_mtts); 1321 hca_profile.fmr_reserved_mtts);
1317 } 1322 }
1323
1324 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1325 printk(KERN_WARNING PFX "bad log_mtts_per_seg (%d). Using default - %d\n",
1326 log_mtts_per_seg, ilog2(MTHCA_MTT_SEG_SIZE / 8));
1327 log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);
1328 }
1318} 1329}
1319 1330
1320static int __init mthca_init(void) 1331static int __init mthca_init(void)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 882e6b735915..d606edf10858 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -220,7 +220,7 @@ static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
220 220
221 mtt->buddy = buddy; 221 mtt->buddy = buddy;
222 mtt->order = 0; 222 mtt->order = 0;
223 for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) 223 for (i = dev->limits.mtt_seg_size / 8; i < size; i <<= 1)
224 ++mtt->order; 224 ++mtt->order;
225 225
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); 226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
@@ -267,7 +267,7 @@ static int __mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
267 267
268 while (list_len > 0) { 268 while (list_len > 0) {
269 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base + 269 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
270 mtt->first_seg * MTHCA_MTT_SEG_SIZE + 270 mtt->first_seg * dev->limits.mtt_seg_size +
271 start_index * 8); 271 start_index * 8);
272 mtt_entry[1] = 0; 272 mtt_entry[1] = 0;
273 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) 273 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
@@ -326,7 +326,7 @@ static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
326 u64 __iomem *mtts; 326 u64 __iomem *mtts;
327 int i; 327 int i;
328 328
329 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * MTHCA_MTT_SEG_SIZE + 329 mtts = dev->mr_table.tavor_fmr.mtt_base + mtt->first_seg * dev->limits.mtt_seg_size +
330 start_index * sizeof (u64); 330 start_index * sizeof (u64);
331 for (i = 0; i < list_len; ++i) 331 for (i = 0; i < list_len; ++i)
332 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT), 332 mthca_write64_raw(cpu_to_be64(buffer_list[i] | MTHCA_MTT_FLAG_PRESENT),
@@ -345,10 +345,10 @@ static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
345 /* For Arbel, all MTTs must fit in the same page. */ 345 /* For Arbel, all MTTs must fit in the same page. */
346 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE); 346 BUG_ON(s / PAGE_SIZE != (s + list_len * sizeof(u64) - 1) / PAGE_SIZE);
347 /* Require full segments */ 347 /* Require full segments */
348 BUG_ON(s % MTHCA_MTT_SEG_SIZE); 348 BUG_ON(s % dev->limits.mtt_seg_size);
349 349
350 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg + 350 mtts = mthca_table_find(dev->mr_table.mtt_table, mtt->first_seg +
351 s / MTHCA_MTT_SEG_SIZE, &dma_handle); 351 s / dev->limits.mtt_seg_size, &dma_handle);
352 352
353 BUG_ON(!mtts); 353 BUG_ON(!mtts);
354 354
@@ -479,7 +479,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
479 if (mr->mtt) 479 if (mr->mtt)
480 mpt_entry->mtt_seg = 480 mpt_entry->mtt_seg =
481 cpu_to_be64(dev->mr_table.mtt_base + 481 cpu_to_be64(dev->mr_table.mtt_base +
482 mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); 482 mr->mtt->first_seg * dev->limits.mtt_seg_size);
483 483
484 if (0) { 484 if (0) {
485 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey); 485 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
@@ -626,7 +626,7 @@ int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
626 goto err_out_table; 626 goto err_out_table;
627 } 627 }
628 628
629 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; 629 mtt_seg = mr->mtt->first_seg * dev->limits.mtt_seg_size;
630 630
631 if (mthca_is_memfree(dev)) { 631 if (mthca_is_memfree(dev)) {
632 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, 632 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
@@ -908,7 +908,7 @@ int mthca_init_mr_table(struct mthca_dev *dev)
908 dev->mr_table.mtt_base); 908 dev->mr_table.mtt_base);
909 909
910 dev->mr_table.tavor_fmr.mtt_base = 910 dev->mr_table.tavor_fmr.mtt_base =
911 ioremap(addr, mtts * MTHCA_MTT_SEG_SIZE); 911 ioremap(addr, mtts * dev->limits.mtt_seg_size);
912 if (!dev->mr_table.tavor_fmr.mtt_base) { 912 if (!dev->mr_table.tavor_fmr.mtt_base) {
913 mthca_warn(dev, "MTT ioremap for FMR failed.\n"); 913 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
914 err = -ENOMEM; 914 err = -ENOMEM;
diff --git a/drivers/infiniband/hw/mthca/mthca_profile.c b/drivers/infiniband/hw/mthca/mthca_profile.c
index d168c2540611..8edb28a9a0e7 100644
--- a/drivers/infiniband/hw/mthca/mthca_profile.c
+++ b/drivers/infiniband/hw/mthca/mthca_profile.c
@@ -94,7 +94,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
94 profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; 94 profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
95 profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; 95 profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
96 profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; 96 profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
97 profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; 97 profile[MTHCA_RES_MTT].size = dev->limits.mtt_seg_size;
98 profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; 98 profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
99 profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; 99 profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
100 profile[MTHCA_RES_UARC].size = request->uarc_size; 100 profile[MTHCA_RES_UARC].size = request->uarc_size;
@@ -232,7 +232,7 @@ s64 mthca_make_profile(struct mthca_dev *dev,
232 dev->limits.num_mtt_segs = profile[i].num; 232 dev->limits.num_mtt_segs = profile[i].num;
233 dev->mr_table.mtt_base = profile[i].start; 233 dev->mr_table.mtt_base = profile[i].start;
234 init_hca->mtt_base = profile[i].start; 234 init_hca->mtt_base = profile[i].start;
235 init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7; 235 init_hca->mtt_seg_sz = ffs(dev->limits.mtt_seg_size) - 7;
236 break; 236 break;
237 case MTHCA_RES_UAR: 237 case MTHCA_RES_UAR:
238 dev->limits.num_uars = profile[i].num; 238 dev->limits.num_uars = profile[i].num;
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b832a7b814a2..4a84d02ece06 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
667 i = 0; 667 i = 0;
668 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) 668 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
669 mdelay(1); 669 mdelay(1);
670 if (i >= 10000) { 670 if (i > 10000) {
671 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); 671 nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n");
672 return 0; 672 return 0;
673 } 673 }
@@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
675 i = 0; 675 i = 0;
676 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) 676 while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000)
677 mdelay(1); 677 mdelay(1);
678 if (i >= 10000) { 678 if (i > 10000) {
679 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", 679 printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n",
680 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); 680 nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS));
681 return 0; 681 return 0;
@@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
701 i = 0; 701 i = 0;
702 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) 702 while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000)
703 mdelay(1); 703 mdelay(1);
704 if (i >= 10000) { 704 if (i > 10000) {
705 nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); 705 nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n");
706 return 0; 706 return 0;
707 } 707 }
@@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
711 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) 711 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
712 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 712 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
713 mdelay(1); 713 mdelay(1);
714 if (i >= 5000) { 714 if (i > 5000) {
715 nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); 715 nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp);
716 return 0; 716 return 0;
717 } 717 }
@@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_
722 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) 722 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
723 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 723 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
724 mdelay(1); 724 mdelay(1);
725 if (i >= 5000) { 725 if (i > 5000) {
726 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); 726 nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp);
727 return 0; 727 return 0;
728 } 728 }
@@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
792 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) 792 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0)
793 & 0x0000000f)) != 0x0000000f) && i++ < 5000) 793 & 0x0000000f)) != 0x0000000f) && i++ < 5000)
794 mdelay(1); 794 mdelay(1);
795 if (i >= 5000) { 795 if (i > 5000) {
796 nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); 796 nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp);
797 return 1; 797 return 1;
798 } 798 }
@@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
815 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) 815 while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1)
816 & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) 816 & 0x0000000f)) != 0x0000000f) && (i++ < 5000))
817 mdelay(1); 817 mdelay(1);
818 if (i >= 5000) { 818 if (i > 5000) {
819 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); 819 printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp);
820 /* return 1; */ 820 /* return 1; */
821 } 821 }
diff --git a/drivers/net/mlx4/eq.c b/drivers/net/mlx4/eq.c
index 8830dcb92ec8..ce064e324200 100644
--- a/drivers/net/mlx4/eq.c
+++ b/drivers/net/mlx4/eq.c
@@ -497,8 +497,10 @@ static void mlx4_free_irqs(struct mlx4_dev *dev)
497 if (eq_table->have_irq) 497 if (eq_table->have_irq)
498 free_irq(dev->pdev->irq, dev); 498 free_irq(dev->pdev->irq, dev);
499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) 499 for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
500 if (eq_table->eq[i].have_irq) 500 if (eq_table->eq[i].have_irq) {
501 free_irq(eq_table->eq[i].irq, eq_table->eq + i); 501 free_irq(eq_table->eq[i].irq, eq_table->eq + i);
502 eq_table->eq[i].have_irq = 0;
503 }
502 504
503 kfree(eq_table->irq_names); 505 kfree(eq_table->irq_names);
504} 506}
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 30bea9689694..018348c01193 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444);
100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " 100MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
101 "(0/1, default 0)"); 101 "(0/1, default 0)");
102 102
103static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
104module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
105MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)");
106
103int mlx4_check_port_params(struct mlx4_dev *dev, 107int mlx4_check_port_params(struct mlx4_dev *dev,
104 enum mlx4_port_type *port_type) 108 enum mlx4_port_type *port_type)
105{ 109{
@@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
203 dev->caps.max_cqes = dev_cap->max_cq_sz - 1; 207 dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
204 dev->caps.reserved_cqs = dev_cap->reserved_cqs; 208 dev->caps.reserved_cqs = dev_cap->reserved_cqs;
205 dev->caps.reserved_eqs = dev_cap->reserved_eqs; 209 dev->caps.reserved_eqs = dev_cap->reserved_eqs;
210 dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
206 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, 211 dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
207 MLX4_MTT_ENTRY_PER_SEG); 212 dev->caps.mtts_per_seg);
208 dev->caps.reserved_mrws = dev_cap->reserved_mrws; 213 dev->caps.reserved_mrws = dev_cap->reserved_mrws;
209 dev->caps.reserved_uars = dev_cap->reserved_uars; 214 dev->caps.reserved_uars = dev_cap->reserved_uars;
210 dev->caps.reserved_pds = dev_cap->reserved_pds; 215 dev->caps.reserved_pds = dev_cap->reserved_pds;
211 dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 216 dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
212 dev->caps.max_msg_sz = dev_cap->max_msg_sz; 217 dev->caps.max_msg_sz = dev_cap->max_msg_sz;
213 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); 218 dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
214 dev->caps.flags = dev_cap->flags; 219 dev->caps.flags = dev_cap->flags;
@@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void)
1304 return -1; 1309 return -1;
1305 } 1310 }
1306 1311
1312 if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) {
1313 printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
1314 return -1;
1315 }
1316
1307 return 0; 1317 return 0;
1308} 1318}
1309 1319
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c
index 0caf74cae8bc..3b8973d19933 100644
--- a/drivers/net/mlx4/mr.c
+++ b/drivers/net/mlx4/mr.c
@@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
209 } else 209 } else
210 mtt->page_shift = page_shift; 210 mtt->page_shift = page_shift;
211 211
212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) 212 for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
213 ++mtt->order; 213 ++mtt->order;
214 214
215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); 215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
@@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | 350 mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
351 MLX4_MPT_PD_FLAG_RAE); 351 MLX4_MPT_PD_FLAG_RAE);
352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * 352 mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
353 MLX4_MTT_ENTRY_PER_SEG); 353 dev->caps.mtts_per_seg);
354 } else { 354 } else {
355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); 355 mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
356 } 356 }
@@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) 391 (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
392 return -EINVAL; 392 return -EINVAL;
393 393
394 if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) 394 if (start_index & (dev->caps.mtts_per_seg - 1))
395 return -EINVAL; 395 return -EINVAL;
396 396
397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + 397 mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c
index cebdf3243ca1..bd22df95adf9 100644
--- a/drivers/net/mlx4/profile.c
+++ b/drivers/net/mlx4/profile.c
@@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; 98 profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; 99 profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; 100 profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
101 profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; 101 profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; 102 profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
103 103
104 profile[MLX4_RES_QP].num = request->num_qp; 104 profile[MLX4_RES_QP].num = request->num_qp;
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3aff8a6a389e..ce7cc6c7bcbb 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -210,6 +210,7 @@ struct mlx4_caps {
210 int num_comp_vectors; 210 int num_comp_vectors;
211 int num_mpts; 211 int num_mpts;
212 int num_mtt_segs; 212 int num_mtt_segs;
213 int mtts_per_seg;
213 int fmr_reserved_mtts; 214 int fmr_reserved_mtts;
214 int reserved_mtts; 215 int reserved_mtts;
215 int reserved_mrws; 216 int reserved_mrws;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index bf8f11982dae..9f29d86e5dc9 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -165,6 +165,7 @@ enum {
165 MLX4_WQE_CTRL_IP_CSUM = 1 << 4, 165 MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
166 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, 166 MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
167 MLX4_WQE_CTRL_INS_VLAN = 1 << 6, 167 MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
168 MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
168}; 169};
169 170
170struct mlx4_wqe_ctrl_seg { 171struct mlx4_wqe_ctrl_seg {