diff options
| -rw-r--r-- | drivers/infiniband/hw/amso1100/c2_cq.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb3/cxio_wr.h | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/cxgb3/iwch_provider.c | 32 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | 28 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_irq.c | 9 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_main.c | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 112 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/hcp_if.c | 6 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/hcp_if.h | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/hcp_phyp.c | 11 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/hcp_phyp.h | 2 | ||||
| -rw-r--r-- | drivers/infiniband/hw/ehca/ipz_pt_fn.c | 19 | ||||
| -rw-r--r-- | drivers/infiniband/hw/mlx4/qp.c | 4 | ||||
| -rw-r--r-- | drivers/infiniband/hw/nes/nes_hw.c | 14 | ||||
| -rw-r--r-- | drivers/net/mlx4/main.c | 14 | ||||
| -rw-r--r-- | drivers/net/mlx4/mr.c | 6 | ||||
| -rw-r--r-- | drivers/net/mlx4/profile.c | 2 | ||||
| -rw-r--r-- | include/linux/mlx4/device.h | 1 | ||||
| -rw-r--r-- | include/linux/mlx4/qp.h | 1 |
19 files changed, 151 insertions, 120 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c index bb17cce3cb59..f5c45b194f53 100644 --- a/drivers/infiniband/hw/amso1100/c2_cq.c +++ b/drivers/infiniband/hw/amso1100/c2_cq.c | |||
| @@ -133,7 +133,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, | |||
| 133 | struct c2_qp *qp; | 133 | struct c2_qp *qp; |
| 134 | int is_recv = 0; | 134 | int is_recv = 0; |
| 135 | 135 | ||
| 136 | ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); | 136 | ce = c2_mq_consume(&cq->mq); |
| 137 | if (!ce) { | 137 | if (!ce) { |
| 138 | return -EAGAIN; | 138 | return -EAGAIN; |
| 139 | } | 139 | } |
| @@ -146,7 +146,7 @@ static inline int c2_poll_one(struct c2_dev *c2dev, | |||
| 146 | while ((qp = | 146 | while ((qp = |
| 147 | (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { | 147 | (struct c2_qp *) (unsigned long) ce->qp_user_context) == NULL) { |
| 148 | c2_mq_free(&cq->mq); | 148 | c2_mq_free(&cq->mq); |
| 149 | ce = (struct c2wr_ce *) c2_mq_consume(&cq->mq); | 149 | ce = c2_mq_consume(&cq->mq); |
| 150 | if (!ce) | 150 | if (!ce) |
| 151 | return -EAGAIN; | 151 | return -EAGAIN; |
| 152 | } | 152 | } |
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h index ff9be1a13106..32e3b1461d81 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_wr.h +++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h | |||
| @@ -176,7 +176,7 @@ struct t3_send_wr { | |||
| 176 | struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ | 176 | struct t3_sge sgl[T3_MAX_SGE]; /* 4+ */ |
| 177 | }; | 177 | }; |
| 178 | 178 | ||
| 179 | #define T3_MAX_FASTREG_DEPTH 24 | 179 | #define T3_MAX_FASTREG_DEPTH 10 |
| 180 | #define T3_MAX_FASTREG_FRAG 10 | 180 | #define T3_MAX_FASTREG_FRAG 10 |
| 181 | 181 | ||
| 182 | struct t3_fastreg_wr { | 182 | struct t3_fastreg_wr { |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 160ef482712d..e2a63214008a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
| @@ -40,6 +40,7 @@ | |||
| 40 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
| 41 | #include <linux/ethtool.h> | 41 | #include <linux/ethtool.h> |
| 42 | #include <linux/rtnetlink.h> | 42 | #include <linux/rtnetlink.h> |
| 43 | #include <linux/inetdevice.h> | ||
| 43 | 44 | ||
| 44 | #include <asm/io.h> | 45 | #include <asm/io.h> |
| 45 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
| @@ -1152,12 +1153,39 @@ static int iwch_query_device(struct ib_device *ibdev, | |||
| 1152 | static int iwch_query_port(struct ib_device *ibdev, | 1153 | static int iwch_query_port(struct ib_device *ibdev, |
| 1153 | u8 port, struct ib_port_attr *props) | 1154 | u8 port, struct ib_port_attr *props) |
| 1154 | { | 1155 | { |
| 1156 | struct iwch_dev *dev; | ||
| 1157 | struct net_device *netdev; | ||
| 1158 | struct in_device *inetdev; | ||
| 1159 | |||
| 1155 | PDBG("%s ibdev %p\n", __func__, ibdev); | 1160 | PDBG("%s ibdev %p\n", __func__, ibdev); |
| 1156 | 1161 | ||
| 1162 | dev = to_iwch_dev(ibdev); | ||
| 1163 | netdev = dev->rdev.port_info.lldevs[port-1]; | ||
| 1164 | |||
| 1157 | memset(props, 0, sizeof(struct ib_port_attr)); | 1165 | memset(props, 0, sizeof(struct ib_port_attr)); |
| 1158 | props->max_mtu = IB_MTU_4096; | 1166 | props->max_mtu = IB_MTU_4096; |
| 1159 | props->active_mtu = IB_MTU_2048; | 1167 | if (netdev->mtu >= 4096) |
| 1160 | props->state = IB_PORT_ACTIVE; | 1168 | props->active_mtu = IB_MTU_4096; |
| 1169 | else if (netdev->mtu >= 2048) | ||
| 1170 | props->active_mtu = IB_MTU_2048; | ||
| 1171 | else if (netdev->mtu >= 1024) | ||
| 1172 | props->active_mtu = IB_MTU_1024; | ||
| 1173 | else if (netdev->mtu >= 512) | ||
| 1174 | props->active_mtu = IB_MTU_512; | ||
| 1175 | else | ||
| 1176 | props->active_mtu = IB_MTU_256; | ||
| 1177 | |||
| 1178 | if (!netif_carrier_ok(netdev)) | ||
| 1179 | props->state = IB_PORT_DOWN; | ||
| 1180 | else { | ||
| 1181 | inetdev = in_dev_get(netdev); | ||
| 1182 | if (inetdev->ifa_list) | ||
| 1183 | props->state = IB_PORT_ACTIVE; | ||
| 1184 | else | ||
| 1185 | props->state = IB_PORT_INIT; | ||
| 1186 | in_dev_put(inetdev); | ||
| 1187 | } | ||
| 1188 | |||
| 1161 | props->port_cap_flags = | 1189 | props->port_cap_flags = |
| 1162 | IB_PORT_CM_SUP | | 1190 | IB_PORT_CM_SUP | |
| 1163 | IB_PORT_SNMP_TUNNEL_SUP | | 1191 | IB_PORT_SNMP_TUNNEL_SUP | |
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h index 1798e6466bd0..689c35786dd2 100644 --- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h +++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h | |||
| @@ -165,7 +165,6 @@ struct hcp_modify_qp_control_block { | |||
| 165 | #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) | 165 | #define MQPCB_MASK_ALT_P_KEY_IDX EHCA_BMASK_IBM( 7, 7) |
| 166 | #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) | 166 | #define MQPCB_MASK_RDMA_ATOMIC_CTRL EHCA_BMASK_IBM( 8, 8) |
| 167 | #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) | 167 | #define MQPCB_MASK_QP_STATE EHCA_BMASK_IBM( 9, 9) |
| 168 | #define MQPCB_QP_STATE EHCA_BMASK_IBM(24, 31) | ||
| 169 | #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) | 168 | #define MQPCB_MASK_RDMA_NR_ATOMIC_RESP_RES EHCA_BMASK_IBM(11, 11) |
| 170 | #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) | 169 | #define MQPCB_MASK_PATH_MIGRATION_STATE EHCA_BMASK_IBM(12, 12) |
| 171 | #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) | 170 | #define MQPCB_MASK_RDMA_ATOMIC_OUTST_DEST_QP EHCA_BMASK_IBM(13, 13) |
| @@ -176,60 +175,33 @@ struct hcp_modify_qp_control_block { | |||
| 176 | #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) | 175 | #define MQPCB_MASK_RETRY_COUNT EHCA_BMASK_IBM(18, 18) |
| 177 | #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) | 176 | #define MQPCB_MASK_TIMEOUT EHCA_BMASK_IBM(19, 19) |
| 178 | #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) | 177 | #define MQPCB_MASK_PATH_MTU EHCA_BMASK_IBM(20, 20) |
| 179 | #define MQPCB_PATH_MTU EHCA_BMASK_IBM(24, 31) | ||
| 180 | #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) | 178 | #define MQPCB_MASK_MAX_STATIC_RATE EHCA_BMASK_IBM(21, 21) |
| 181 | #define MQPCB_MAX_STATIC_RATE EHCA_BMASK_IBM(24, 31) | ||
| 182 | #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) | 179 | #define MQPCB_MASK_DLID EHCA_BMASK_IBM(22, 22) |
| 183 | #define MQPCB_DLID EHCA_BMASK_IBM(16, 31) | ||
| 184 | #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) | 180 | #define MQPCB_MASK_RNR_RETRY_COUNT EHCA_BMASK_IBM(23, 23) |
| 185 | #define MQPCB_RNR_RETRY_COUNT EHCA_BMASK_IBM(29, 31) | ||
| 186 | #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) | 181 | #define MQPCB_MASK_SOURCE_PATH_BITS EHCA_BMASK_IBM(24, 24) |
| 187 | #define MQPCB_SOURCE_PATH_BITS EHCA_BMASK_IBM(25, 31) | ||
| 188 | #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) | 182 | #define MQPCB_MASK_TRAFFIC_CLASS EHCA_BMASK_IBM(25, 25) |
| 189 | #define MQPCB_TRAFFIC_CLASS EHCA_BMASK_IBM(24, 31) | ||
| 190 | #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) | 183 | #define MQPCB_MASK_HOP_LIMIT EHCA_BMASK_IBM(26, 26) |
| 191 | #define MQPCB_HOP_LIMIT EHCA_BMASK_IBM(24, 31) | ||
| 192 | #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) | 184 | #define MQPCB_MASK_SOURCE_GID_IDX EHCA_BMASK_IBM(27, 27) |
| 193 | #define MQPCB_SOURCE_GID_IDX EHCA_BMASK_IBM(24, 31) | ||
| 194 | #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) | 185 | #define MQPCB_MASK_FLOW_LABEL EHCA_BMASK_IBM(28, 28) |
| 195 | #define MQPCB_FLOW_LABEL EHCA_BMASK_IBM(12, 31) | ||
| 196 | #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) | 186 | #define MQPCB_MASK_DEST_GID EHCA_BMASK_IBM(30, 30) |
| 197 | #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) | 187 | #define MQPCB_MASK_SERVICE_LEVEL_AL EHCA_BMASK_IBM(31, 31) |
| 198 | #define MQPCB_SERVICE_LEVEL_AL EHCA_BMASK_IBM(28, 31) | ||
| 199 | #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) | 188 | #define MQPCB_MASK_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(32, 32) |
| 200 | #define MQPCB_SEND_GRH_FLAG_AL EHCA_BMASK_IBM(31, 31) | ||
| 201 | #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) | 189 | #define MQPCB_MASK_RETRY_COUNT_AL EHCA_BMASK_IBM(33, 33) |
| 202 | #define MQPCB_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) | ||
| 203 | #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) | 190 | #define MQPCB_MASK_TIMEOUT_AL EHCA_BMASK_IBM(34, 34) |
| 204 | #define MQPCB_TIMEOUT_AL EHCA_BMASK_IBM(27, 31) | ||
| 205 | #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) | 191 | #define MQPCB_MASK_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(35, 35) |
| 206 | #define MQPCB_MAX_STATIC_RATE_AL EHCA_BMASK_IBM(24, 31) | ||
| 207 | #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) | 192 | #define MQPCB_MASK_DLID_AL EHCA_BMASK_IBM(36, 36) |
| 208 | #define MQPCB_DLID_AL EHCA_BMASK_IBM(16, 31) | ||
| 209 | #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) | 193 | #define MQPCB_MASK_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(37, 37) |
| 210 | #define MQPCB_RNR_RETRY_COUNT_AL EHCA_BMASK_IBM(29, 31) | ||
| 211 | #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) | 194 | #define MQPCB_MASK_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(38, 38) |
| 212 | #define MQPCB_SOURCE_PATH_BITS_AL EHCA_BMASK_IBM(25, 31) | ||
| 213 | #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) | 195 | #define MQPCB_MASK_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(39, 39) |
| 214 | #define MQPCB_TRAFFIC_CLASS_AL EHCA_BMASK_IBM(24, 31) | ||
| 215 | #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) | 196 | #define MQPCB_MASK_HOP_LIMIT_AL EHCA_BMASK_IBM(40, 40) |
| 216 | #define MQPCB_HOP_LIMIT_AL EHCA_BMASK_IBM(24, 31) | ||
| 217 | #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) | 197 | #define MQPCB_MASK_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(41, 41) |
| 218 | #define MQPCB_SOURCE_GID_IDX_AL EHCA_BMASK_IBM(24, 31) | ||
| 219 | #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) | 198 | #define MQPCB_MASK_FLOW_LABEL_AL EHCA_BMASK_IBM(42, 42) |
| 220 | #define MQPCB_FLOW_LABEL_AL EHCA_BMASK_IBM(12, 31) | ||
| 221 | #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) | 199 | #define MQPCB_MASK_DEST_GID_AL EHCA_BMASK_IBM(44, 44) |
| 222 | #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) | 200 | #define MQPCB_MASK_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(45, 45) |
| 223 | #define MQPCB_MAX_NR_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) | ||
| 224 | #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) | 201 | #define MQPCB_MASK_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(46, 46) |
| 225 | #define MQPCB_MAX_NR_OUTST_RECV_WR EHCA_BMASK_IBM(16, 31) | ||
| 226 | #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) | 202 | #define MQPCB_MASK_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(47, 47) |
| 227 | #define MQPCB_DISABLE_ETE_CREDIT_CHECK EHCA_BMASK_IBM(31, 31) | ||
| 228 | #define MQPCB_QP_NUMBER EHCA_BMASK_IBM( 8, 31) | ||
| 229 | #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) | 203 | #define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48, 48) |
| 230 | #define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31, 31) | ||
| 231 | #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) | 204 | #define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49, 49) |
| 232 | #define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16, 31) | ||
| 233 | #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) | 205 | #define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50, 50) |
| 234 | #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) | 206 | #define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51, 51) |
| 235 | 207 | ||
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index 99bcbd7ffb0a..4b89b791be6a 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c | |||
| @@ -479,13 +479,13 @@ void ehca_tasklet_neq(unsigned long data) | |||
| 479 | struct ehca_eqe *eqe; | 479 | struct ehca_eqe *eqe; |
| 480 | u64 ret; | 480 | u64 ret; |
| 481 | 481 | ||
| 482 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); | 482 | eqe = ehca_poll_eq(shca, &shca->neq); |
| 483 | 483 | ||
| 484 | while (eqe) { | 484 | while (eqe) { |
| 485 | if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) | 485 | if (!EHCA_BMASK_GET(NEQE_COMPLETION_EVENT, eqe->entry)) |
| 486 | parse_ec(shca, eqe->entry); | 486 | parse_ec(shca, eqe->entry); |
| 487 | 487 | ||
| 488 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->neq); | 488 | eqe = ehca_poll_eq(shca, &shca->neq); |
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | ret = hipz_h_reset_event(shca->ipz_hca_handle, | 491 | ret = hipz_h_reset_event(shca->ipz_hca_handle, |
| @@ -572,8 +572,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
| 572 | eqe_cnt = 0; | 572 | eqe_cnt = 0; |
| 573 | do { | 573 | do { |
| 574 | u32 token; | 574 | u32 token; |
| 575 | eqe_cache[eqe_cnt].eqe = | 575 | eqe_cache[eqe_cnt].eqe = ehca_poll_eq(shca, eq); |
| 576 | (struct ehca_eqe *)ehca_poll_eq(shca, eq); | ||
| 577 | if (!eqe_cache[eqe_cnt].eqe) | 576 | if (!eqe_cache[eqe_cnt].eqe) |
| 578 | break; | 577 | break; |
| 579 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; | 578 | eqe_value = eqe_cache[eqe_cnt].eqe->entry; |
| @@ -637,7 +636,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq) | |||
| 637 | goto unlock_irq_spinlock; | 636 | goto unlock_irq_spinlock; |
| 638 | do { | 637 | do { |
| 639 | struct ehca_eqe *eqe; | 638 | struct ehca_eqe *eqe; |
| 640 | eqe = (struct ehca_eqe *)ehca_poll_eq(shca, &shca->eq); | 639 | eqe = ehca_poll_eq(shca, &shca->eq); |
| 641 | if (!eqe) | 640 | if (!eqe) |
| 642 | break; | 641 | break; |
| 643 | process_eqe(shca, eqe); | 642 | process_eqe(shca, eqe); |
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 368311ce332b..85905ab9391f 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c | |||
| @@ -52,7 +52,7 @@ | |||
| 52 | #include "ehca_tools.h" | 52 | #include "ehca_tools.h" |
| 53 | #include "hcp_if.h" | 53 | #include "hcp_if.h" |
| 54 | 54 | ||
| 55 | #define HCAD_VERSION "0026" | 55 | #define HCAD_VERSION "0027" |
| 56 | 56 | ||
| 57 | MODULE_LICENSE("Dual BSD/GPL"); | 57 | MODULE_LICENSE("Dual BSD/GPL"); |
| 58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); | 58 | MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>"); |
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 00c108159714..0338f1fabe8a 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c | |||
| @@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp( | |||
| 461 | ib_device); | 461 | ib_device); |
| 462 | struct ib_ucontext *context = NULL; | 462 | struct ib_ucontext *context = NULL; |
| 463 | u64 h_ret; | 463 | u64 h_ret; |
| 464 | int is_llqp = 0, has_srq = 0; | 464 | int is_llqp = 0, has_srq = 0, is_user = 0; |
| 465 | int qp_type, max_send_sge, max_recv_sge, ret; | 465 | int qp_type, max_send_sge, max_recv_sge, ret; |
| 466 | 466 | ||
| 467 | /* h_call's out parameters */ | 467 | /* h_call's out parameters */ |
| @@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp( | |||
| 609 | } | 609 | } |
| 610 | } | 610 | } |
| 611 | 611 | ||
| 612 | if (pd->uobject && udata) | ||
| 613 | context = pd->uobject->context; | ||
| 614 | |||
| 615 | my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); | 612 | my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); |
| 616 | if (!my_qp) { | 613 | if (!my_qp) { |
| 617 | ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); | 614 | ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); |
| @@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp( | |||
| 619 | return ERR_PTR(-ENOMEM); | 616 | return ERR_PTR(-ENOMEM); |
| 620 | } | 617 | } |
| 621 | 618 | ||
| 619 | if (pd->uobject && udata) { | ||
| 620 | is_user = 1; | ||
| 621 | context = pd->uobject->context; | ||
| 622 | } | ||
| 623 | |||
| 622 | atomic_set(&my_qp->nr_events, 0); | 624 | atomic_set(&my_qp->nr_events, 0); |
| 623 | init_waitqueue_head(&my_qp->wait_completion); | 625 | init_waitqueue_head(&my_qp->wait_completion); |
| 624 | spin_lock_init(&my_qp->spinlock_s); | 626 | spin_lock_init(&my_qp->spinlock_s); |
| @@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp( | |||
| 707 | (parms.squeue.is_small || parms.rqueue.is_small); | 709 | (parms.squeue.is_small || parms.rqueue.is_small); |
| 708 | } | 710 | } |
| 709 | 711 | ||
| 710 | h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); | 712 | h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user); |
| 711 | if (h_ret != H_SUCCESS) { | 713 | if (h_ret != H_SUCCESS) { |
| 712 | ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", | 714 | ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", |
| 713 | h_ret); | 715 | h_ret); |
| @@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp( | |||
| 769 | goto create_qp_exit2; | 771 | goto create_qp_exit2; |
| 770 | } | 772 | } |
| 771 | 773 | ||
| 772 | my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / | 774 | if (!is_user) { |
| 773 | my_qp->ipz_squeue.qe_size; | 775 | my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / |
| 774 | my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * | 776 | my_qp->ipz_squeue.qe_size; |
| 775 | sizeof(struct ehca_qmap_entry)); | 777 | my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * |
| 776 | if (!my_qp->sq_map.map) { | 778 | sizeof(struct ehca_qmap_entry)); |
| 777 | ehca_err(pd->device, "Couldn't allocate squeue " | 779 | if (!my_qp->sq_map.map) { |
| 778 | "map ret=%i", ret); | 780 | ehca_err(pd->device, "Couldn't allocate squeue " |
| 779 | goto create_qp_exit3; | 781 | "map ret=%i", ret); |
| 782 | goto create_qp_exit3; | ||
| 783 | } | ||
| 784 | INIT_LIST_HEAD(&my_qp->sq_err_node); | ||
| 785 | /* to avoid the generation of bogus flush CQEs */ | ||
| 786 | reset_queue_map(&my_qp->sq_map); | ||
| 780 | } | 787 | } |
| 781 | INIT_LIST_HEAD(&my_qp->sq_err_node); | ||
| 782 | /* to avoid the generation of bogus flush CQEs */ | ||
| 783 | reset_queue_map(&my_qp->sq_map); | ||
| 784 | } | 788 | } |
| 785 | 789 | ||
| 786 | if (HAS_RQ(my_qp)) { | 790 | if (HAS_RQ(my_qp)) { |
| @@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp( | |||
| 792 | "and pages ret=%i", ret); | 796 | "and pages ret=%i", ret); |
| 793 | goto create_qp_exit4; | 797 | goto create_qp_exit4; |
| 794 | } | 798 | } |
| 795 | 799 | if (!is_user) { | |
| 796 | my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / | 800 | my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / |
| 797 | my_qp->ipz_rqueue.qe_size; | 801 | my_qp->ipz_rqueue.qe_size; |
| 798 | my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * | 802 | my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * |
| 799 | sizeof(struct ehca_qmap_entry)); | 803 | sizeof(struct ehca_qmap_entry)); |
| 800 | if (!my_qp->rq_map.map) { | 804 | if (!my_qp->rq_map.map) { |
| 801 | ehca_err(pd->device, "Couldn't allocate squeue " | 805 | ehca_err(pd->device, "Couldn't allocate squeue " |
| 802 | "map ret=%i", ret); | 806 | "map ret=%i", ret); |
| 803 | goto create_qp_exit5; | 807 | goto create_qp_exit5; |
| 808 | } | ||
| 809 | INIT_LIST_HEAD(&my_qp->rq_err_node); | ||
| 810 | /* to avoid the generation of bogus flush CQEs */ | ||
| 811 | reset_queue_map(&my_qp->rq_map); | ||
| 804 | } | 812 | } |
| 805 | INIT_LIST_HEAD(&my_qp->rq_err_node); | 813 | } else if (init_attr->srq && !is_user) { |
| 806 | /* to avoid the generation of bogus flush CQEs */ | ||
| 807 | reset_queue_map(&my_qp->rq_map); | ||
| 808 | } else if (init_attr->srq) { | ||
| 809 | /* this is a base QP, use the queue map of the SRQ */ | 814 | /* this is a base QP, use the queue map of the SRQ */ |
| 810 | my_qp->rq_map = my_srq->rq_map; | 815 | my_qp->rq_map = my_srq->rq_map; |
| 811 | INIT_LIST_HEAD(&my_qp->rq_err_node); | 816 | INIT_LIST_HEAD(&my_qp->rq_err_node); |
| @@ -918,7 +923,7 @@ create_qp_exit7: | |||
| 918 | kfree(my_qp->mod_qp_parm); | 923 | kfree(my_qp->mod_qp_parm); |
| 919 | 924 | ||
| 920 | create_qp_exit6: | 925 | create_qp_exit6: |
| 921 | if (HAS_RQ(my_qp)) | 926 | if (HAS_RQ(my_qp) && !is_user) |
| 922 | vfree(my_qp->rq_map.map); | 927 | vfree(my_qp->rq_map.map); |
| 923 | 928 | ||
| 924 | create_qp_exit5: | 929 | create_qp_exit5: |
| @@ -926,7 +931,7 @@ create_qp_exit5: | |||
| 926 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 931 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
| 927 | 932 | ||
| 928 | create_qp_exit4: | 933 | create_qp_exit4: |
| 929 | if (HAS_SQ(my_qp)) | 934 | if (HAS_SQ(my_qp) && !is_user) |
| 930 | vfree(my_qp->sq_map.map); | 935 | vfree(my_qp->sq_map.map); |
| 931 | 936 | ||
| 932 | create_qp_exit3: | 937 | create_qp_exit3: |
| @@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1244 | u64 update_mask; | 1249 | u64 update_mask; |
| 1245 | u64 h_ret; | 1250 | u64 h_ret; |
| 1246 | int bad_wqe_cnt = 0; | 1251 | int bad_wqe_cnt = 0; |
| 1252 | int is_user = 0; | ||
| 1247 | int squeue_locked = 0; | 1253 | int squeue_locked = 0; |
| 1248 | unsigned long flags = 0; | 1254 | unsigned long flags = 0; |
| 1249 | 1255 | ||
| @@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1266 | ret = ehca2ib_return_code(h_ret); | 1272 | ret = ehca2ib_return_code(h_ret); |
| 1267 | goto modify_qp_exit1; | 1273 | goto modify_qp_exit1; |
| 1268 | } | 1274 | } |
| 1275 | if (ibqp->uobject) | ||
| 1276 | is_user = 1; | ||
| 1269 | 1277 | ||
| 1270 | qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); | 1278 | qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); |
| 1271 | 1279 | ||
| @@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1728 | goto modify_qp_exit2; | 1736 | goto modify_qp_exit2; |
| 1729 | } | 1737 | } |
| 1730 | } | 1738 | } |
| 1731 | if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { | 1739 | if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) |
| 1740 | && !is_user) { | ||
| 1732 | ret = check_for_left_cqes(my_qp, shca); | 1741 | ret = check_for_left_cqes(my_qp, shca); |
| 1733 | if (ret) | 1742 | if (ret) |
| 1734 | goto modify_qp_exit2; | 1743 | goto modify_qp_exit2; |
| @@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp, | |||
| 1738 | ipz_qeit_reset(&my_qp->ipz_rqueue); | 1747 | ipz_qeit_reset(&my_qp->ipz_rqueue); |
| 1739 | ipz_qeit_reset(&my_qp->ipz_squeue); | 1748 | ipz_qeit_reset(&my_qp->ipz_squeue); |
| 1740 | 1749 | ||
| 1741 | if (qp_cur_state == IB_QPS_ERR) { | 1750 | if (qp_cur_state == IB_QPS_ERR && !is_user) { |
| 1742 | del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); | 1751 | del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); |
| 1743 | 1752 | ||
| 1744 | if (HAS_RQ(my_qp)) | 1753 | if (HAS_RQ(my_qp)) |
| 1745 | del_from_err_list(my_qp->recv_cq, | 1754 | del_from_err_list(my_qp->recv_cq, |
| 1746 | &my_qp->rq_err_node); | 1755 | &my_qp->rq_err_node); |
| 1747 | } | 1756 | } |
| 1748 | reset_queue_map(&my_qp->sq_map); | 1757 | if (!is_user) |
| 1758 | reset_queue_map(&my_qp->sq_map); | ||
| 1749 | 1759 | ||
| 1750 | if (HAS_RQ(my_qp)) | 1760 | if (HAS_RQ(my_qp) && !is_user) |
| 1751 | reset_queue_map(&my_qp->rq_map); | 1761 | reset_queue_map(&my_qp->rq_map); |
| 1752 | } | 1762 | } |
| 1753 | 1763 | ||
| @@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp, | |||
| 1952 | qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; | 1962 | qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; |
| 1953 | qp_attr->dest_qp_num = qpcb->dest_qp_nr; | 1963 | qp_attr->dest_qp_num = qpcb->dest_qp_nr; |
| 1954 | 1964 | ||
| 1955 | qp_attr->pkey_index = | 1965 | qp_attr->pkey_index = qpcb->prim_p_key_idx; |
| 1956 | EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); | 1966 | qp_attr->port_num = qpcb->prim_phys_port; |
| 1957 | |||
| 1958 | qp_attr->port_num = | ||
| 1959 | EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); | ||
| 1960 | |||
| 1961 | qp_attr->timeout = qpcb->timeout; | 1967 | qp_attr->timeout = qpcb->timeout; |
| 1962 | qp_attr->retry_cnt = qpcb->retry_count; | 1968 | qp_attr->retry_cnt = qpcb->retry_count; |
| 1963 | qp_attr->rnr_retry = qpcb->rnr_retry_count; | 1969 | qp_attr->rnr_retry = qpcb->rnr_retry_count; |
| 1964 | 1970 | ||
| 1965 | qp_attr->alt_pkey_index = | 1971 | qp_attr->alt_pkey_index = qpcb->alt_p_key_idx; |
| 1966 | EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); | ||
| 1967 | |||
| 1968 | qp_attr->alt_port_num = qpcb->alt_phys_port; | 1972 | qp_attr->alt_port_num = qpcb->alt_phys_port; |
| 1969 | qp_attr->alt_timeout = qpcb->timeout_al; | 1973 | qp_attr->alt_timeout = qpcb->timeout_al; |
| 1970 | 1974 | ||
| @@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr, | |||
| 2051 | update_mask |= | 2055 | update_mask |= |
| 2052 | EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) | 2056 | EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) |
| 2053 | | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); | 2057 | | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); |
| 2054 | mqpcb->curr_srq_limit = | 2058 | mqpcb->curr_srq_limit = attr->srq_limit; |
| 2055 | EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit); | ||
| 2056 | mqpcb->qp_aff_asyn_ev_log_reg = | 2059 | mqpcb->qp_aff_asyn_ev_log_reg = |
| 2057 | EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); | 2060 | EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); |
| 2058 | } | 2061 | } |
| @@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr) | |||
| 2115 | 2118 | ||
| 2116 | srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; | 2119 | srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; |
| 2117 | srq_attr->max_sge = 3; | 2120 | srq_attr->max_sge = 3; |
| 2118 | srq_attr->srq_limit = EHCA_BMASK_GET( | 2121 | srq_attr->srq_limit = qpcb->curr_srq_limit; |
| 2119 | MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); | ||
| 2120 | 2122 | ||
| 2121 | if (ehca_debug_level >= 2) | 2123 | if (ehca_debug_level >= 2) |
| 2122 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); | 2124 | ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); |
| @@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
| 2138 | int ret; | 2140 | int ret; |
| 2139 | u64 h_ret; | 2141 | u64 h_ret; |
| 2140 | u8 port_num; | 2142 | u8 port_num; |
| 2143 | int is_user = 0; | ||
| 2141 | enum ib_qp_type qp_type; | 2144 | enum ib_qp_type qp_type; |
| 2142 | unsigned long flags; | 2145 | unsigned long flags; |
| 2143 | 2146 | ||
| 2144 | if (uobject) { | 2147 | if (uobject) { |
| 2148 | is_user = 1; | ||
| 2145 | if (my_qp->mm_count_galpa || | 2149 | if (my_qp->mm_count_galpa || |
| 2146 | my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { | 2150 | my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { |
| 2147 | ehca_err(dev, "Resources still referenced in " | 2151 | ehca_err(dev, "Resources still referenced in " |
| @@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
| 2168 | * SRQs will never get into an error list and do not have a recv_cq, | 2172 | * SRQs will never get into an error list and do not have a recv_cq, |
| 2169 | * so we need to skip them here. | 2173 | * so we need to skip them here. |
| 2170 | */ | 2174 | */ |
| 2171 | if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) | 2175 | if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user) |
| 2172 | del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); | 2176 | del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); |
| 2173 | 2177 | ||
| 2174 | if (HAS_SQ(my_qp)) | 2178 | if (HAS_SQ(my_qp) && !is_user) |
| 2175 | del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); | 2179 | del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); |
| 2176 | 2180 | ||
| 2177 | /* now wait until all pending events have completed */ | 2181 | /* now wait until all pending events have completed */ |
| @@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp, | |||
| 2209 | 2213 | ||
| 2210 | if (HAS_RQ(my_qp)) { | 2214 | if (HAS_RQ(my_qp)) { |
| 2211 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); | 2215 | ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); |
| 2212 | 2216 | if (!is_user) | |
| 2213 | vfree(my_qp->rq_map.map); | 2217 | vfree(my_qp->rq_map.map); |
| 2214 | } | 2218 | } |
| 2215 | if (HAS_SQ(my_qp)) { | 2219 | if (HAS_SQ(my_qp)) { |
| 2216 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); | 2220 | ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); |
| 2217 | 2221 | if (!is_user) | |
| 2218 | vfree(my_qp->sq_map.map); | 2222 | vfree(my_qp->sq_map.map); |
| 2219 | } | 2223 | } |
| 2220 | kmem_cache_free(qp_cache, my_qp); | 2224 | kmem_cache_free(qp_cache, my_qp); |
| 2221 | atomic_dec(&shca->num_qps); | 2225 | atomic_dec(&shca->num_qps); |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c index d0ab0c0d5e91..4d5dc3304d42 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.c +++ b/drivers/infiniband/hw/ehca/hcp_if.c | |||
| @@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
| 284 | param->act_pages = (u32)outs[4]; | 284 | param->act_pages = (u32)outs[4]; |
| 285 | 285 | ||
| 286 | if (ret == H_SUCCESS) | 286 | if (ret == H_SUCCESS) |
| 287 | hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); | 287 | hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]); |
| 288 | 288 | ||
| 289 | if (ret == H_NOT_ENOUGH_RESOURCES) | 289 | if (ret == H_NOT_ENOUGH_RESOURCES) |
| 290 | ehca_gen_err("Not enough resources. ret=%lli", ret); | 290 | ehca_gen_err("Not enough resources. ret=%lli", ret); |
| @@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
| 293 | } | 293 | } |
| 294 | 294 | ||
| 295 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | 295 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, |
| 296 | struct ehca_alloc_qp_parms *parms) | 296 | struct ehca_alloc_qp_parms *parms, int is_user) |
| 297 | { | 297 | { |
| 298 | u64 ret; | 298 | u64 ret; |
| 299 | u64 allocate_controls, max_r10_reg, r11, r12; | 299 | u64 allocate_controls, max_r10_reg, r11, r12; |
| @@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | |||
| 359 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); | 359 | (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); |
| 360 | 360 | ||
| 361 | if (ret == H_SUCCESS) | 361 | if (ret == H_SUCCESS) |
| 362 | hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); | 362 | hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]); |
| 363 | 363 | ||
| 364 | if (ret == H_NOT_ENOUGH_RESOURCES) | 364 | if (ret == H_NOT_ENOUGH_RESOURCES) |
| 365 | ehca_gen_err("Not enough resources. ret=%lli", ret); | 365 | ehca_gen_err("Not enough resources. ret=%lli", ret); |
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h index 2c3c6e0ea5c2..39c1c3618ec7 100644 --- a/drivers/infiniband/hw/ehca/hcp_if.h +++ b/drivers/infiniband/hw/ehca/hcp_if.h | |||
| @@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle, | |||
| 78 | * initialize resources, create empty QPPTs (2 rings). | 78 | * initialize resources, create empty QPPTs (2 rings). |
| 79 | */ | 79 | */ |
| 80 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, | 80 | u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, |
| 81 | struct ehca_alloc_qp_parms *parms); | 81 | struct ehca_alloc_qp_parms *parms, int is_user); |
| 82 | 82 | ||
| 83 | u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, | 83 | u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, |
| 84 | const u8 port_id, | 84 | const u8 port_id, |
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c index 214821095cb1..b3e0e72e8a73 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.c +++ b/drivers/infiniband/hw/ehca/hcp_phyp.c | |||
| @@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr) | |||
| 54 | return 0; | 54 | return 0; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | int hcp_galpas_ctor(struct h_galpas *galpas, | 57 | int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, |
| 58 | u64 paddr_kernel, u64 paddr_user) | 58 | u64 paddr_kernel, u64 paddr_user) |
| 59 | { | 59 | { |
| 60 | int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); | 60 | if (!is_user) { |
| 61 | if (ret) | 61 | int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); |
| 62 | return ret; | 62 | if (ret) |
| 63 | return ret; | ||
| 64 | } else | ||
| 65 | galpas->kernel.fw_handle = 0; | ||
| 63 | 66 | ||
| 64 | galpas->user.fw_handle = paddr_user; | 67 | galpas->user.fw_handle = paddr_user; |
| 65 | 68 | ||
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h index 5305c2a3ed94..204227d5303a 100644 --- a/drivers/infiniband/hw/ehca/hcp_phyp.h +++ b/drivers/infiniband/hw/ehca/hcp_phyp.h | |||
| @@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value) | |||
| 78 | *(volatile u64 __force *)addr = value; | 78 | *(volatile u64 __force *)addr = value; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | int hcp_galpas_ctor(struct h_galpas *galpas, | 81 | int hcp_galpas_ctor(struct h_galpas *galpas, int is_user, |
| 82 | u64 paddr_kernel, u64 paddr_user); | 82 | u64 paddr_kernel, u64 paddr_user); |
| 83 | 83 | ||
| 84 | int hcp_galpas_dtor(struct h_galpas *galpas); | 84 | int hcp_galpas_dtor(struct h_galpas *galpas); |
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.c b/drivers/infiniband/hw/ehca/ipz_pt_fn.c index c3a328465431..1227c593627a 100644 --- a/drivers/infiniband/hw/ehca/ipz_pt_fn.c +++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.c | |||
| @@ -220,10 +220,13 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, | |||
| 220 | queue->small_page = NULL; | 220 | queue->small_page = NULL; |
| 221 | 221 | ||
| 222 | /* allocate queue page pointers */ | 222 | /* allocate queue page pointers */ |
| 223 | queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); | 223 | queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); |
| 224 | if (!queue->queue_pages) { | 224 | if (!queue->queue_pages) { |
| 225 | ehca_gen_err("Couldn't allocate queue page list"); | 225 | queue->queue_pages = vmalloc(nr_of_pages * sizeof(void *)); |
| 226 | return 0; | 226 | if (!queue->queue_pages) { |
| 227 | ehca_gen_err("Couldn't allocate queue page list"); | ||
| 228 | return 0; | ||
| 229 | } | ||
| 227 | } | 230 | } |
| 228 | memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); | 231 | memset(queue->queue_pages, 0, nr_of_pages * sizeof(void *)); |
| 229 | 232 | ||
| @@ -240,7 +243,10 @@ int ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, | |||
| 240 | ipz_queue_ctor_exit0: | 243 | ipz_queue_ctor_exit0: |
| 241 | ehca_gen_err("Couldn't alloc pages queue=%p " | 244 | ehca_gen_err("Couldn't alloc pages queue=%p " |
| 242 | "nr_of_pages=%x", queue, nr_of_pages); | 245 | "nr_of_pages=%x", queue, nr_of_pages); |
| 243 | vfree(queue->queue_pages); | 246 | if (is_vmalloc_addr(queue->queue_pages)) |
| 247 | vfree(queue->queue_pages); | ||
| 248 | else | ||
| 249 | kfree(queue->queue_pages); | ||
| 244 | 250 | ||
| 245 | return 0; | 251 | return 0; |
| 246 | } | 252 | } |
| @@ -262,7 +268,10 @@ int ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) | |||
| 262 | free_page((unsigned long)queue->queue_pages[i]); | 268 | free_page((unsigned long)queue->queue_pages[i]); |
| 263 | } | 269 | } |
| 264 | 270 | ||
| 265 | vfree(queue->queue_pages); | 271 | if (is_vmalloc_addr(queue->queue_pages)) |
| 272 | vfree(queue->queue_pages); | ||
| 273 | else | ||
| 274 | kfree(queue->queue_pages); | ||
| 266 | 275 | ||
| 267 | return 1; | 276 | return 1; |
| 268 | } | 277 | } |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 20724aee76f4..c4a02648c8af 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -1585,12 +1585,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 1585 | break; | 1585 | break; |
| 1586 | 1586 | ||
| 1587 | case IB_WR_LOCAL_INV: | 1587 | case IB_WR_LOCAL_INV: |
| 1588 | ctrl->srcrb_flags |= | ||
| 1589 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | ||
| 1588 | set_local_inv_seg(wqe, wr->ex.invalidate_rkey); | 1590 | set_local_inv_seg(wqe, wr->ex.invalidate_rkey); |
| 1589 | wqe += sizeof (struct mlx4_wqe_local_inval_seg); | 1591 | wqe += sizeof (struct mlx4_wqe_local_inval_seg); |
| 1590 | size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; | 1592 | size += sizeof (struct mlx4_wqe_local_inval_seg) / 16; |
| 1591 | break; | 1593 | break; |
| 1592 | 1594 | ||
| 1593 | case IB_WR_FAST_REG_MR: | 1595 | case IB_WR_FAST_REG_MR: |
| 1596 | ctrl->srcrb_flags |= | ||
| 1597 | cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); | ||
| 1594 | set_fmr_seg(wqe, wr); | 1598 | set_fmr_seg(wqe, wr); |
| 1595 | wqe += sizeof (struct mlx4_wqe_fmr_seg); | 1599 | wqe += sizeof (struct mlx4_wqe_fmr_seg); |
| 1596 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; | 1600 | size += sizeof (struct mlx4_wqe_fmr_seg) / 16; |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index b832a7b814a2..4a84d02ece06 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
| @@ -667,7 +667,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 667 | i = 0; | 667 | i = 0; |
| 668 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) | 668 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) |
| 669 | mdelay(1); | 669 | mdelay(1); |
| 670 | if (i >= 10000) { | 670 | if (i > 10000) { |
| 671 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); | 671 | nes_debug(NES_DBG_INIT, "Did not see full soft reset done.\n"); |
| 672 | return 0; | 672 | return 0; |
| 673 | } | 673 | } |
| @@ -675,7 +675,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 675 | i = 0; | 675 | i = 0; |
| 676 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) | 676 | while ((nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS) != 0x80) && i++ < 10000) |
| 677 | mdelay(1); | 677 | mdelay(1); |
| 678 | if (i >= 10000) { | 678 | if (i > 10000) { |
| 679 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", | 679 | printk(KERN_ERR PFX "Internal CPU not ready, status = %02X\n", |
| 680 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); | 680 | nes_read_indexed(nesdev, NES_IDX_INT_CPU_STATUS)); |
| 681 | return 0; | 681 | return 0; |
| @@ -701,7 +701,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 701 | i = 0; | 701 | i = 0; |
| 702 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) | 702 | while (((nes_read32(nesdev->regs+NES_SOFTWARE_RESET) & 0x00000040) == 0) && i++ < 10000) |
| 703 | mdelay(1); | 703 | mdelay(1); |
| 704 | if (i >= 10000) { | 704 | if (i > 10000) { |
| 705 | nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); | 705 | nes_debug(NES_DBG_INIT, "Did not see port soft reset done.\n"); |
| 706 | return 0; | 706 | return 0; |
| 707 | } | 707 | } |
| @@ -711,7 +711,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 711 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) | 711 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) |
| 712 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | 712 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) |
| 713 | mdelay(1); | 713 | mdelay(1); |
| 714 | if (i >= 5000) { | 714 | if (i > 5000) { |
| 715 | nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); | 715 | nes_debug(NES_DBG_INIT, "Serdes 0 not ready, status=%x\n", u32temp); |
| 716 | return 0; | 716 | return 0; |
| 717 | } | 717 | } |
| @@ -722,7 +722,7 @@ static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_ | |||
| 722 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) | 722 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) |
| 723 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | 723 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) |
| 724 | mdelay(1); | 724 | mdelay(1); |
| 725 | if (i >= 5000) { | 725 | if (i > 5000) { |
| 726 | nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); | 726 | nes_debug(NES_DBG_INIT, "Serdes 1 not ready, status=%x\n", u32temp); |
| 727 | return 0; | 727 | return 0; |
| 728 | } | 728 | } |
| @@ -792,7 +792,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, | |||
| 792 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) | 792 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS0) |
| 793 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) | 793 | & 0x0000000f)) != 0x0000000f) && i++ < 5000) |
| 794 | mdelay(1); | 794 | mdelay(1); |
| 795 | if (i >= 5000) { | 795 | if (i > 5000) { |
| 796 | nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); | 796 | nes_debug(NES_DBG_PHY, "Init: serdes 0 not ready, status=%x\n", u32temp); |
| 797 | return 1; | 797 | return 1; |
| 798 | } | 798 | } |
| @@ -815,7 +815,7 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count, | |||
| 815 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) | 815 | while (((u32temp = (nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_STATUS1) |
| 816 | & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) | 816 | & 0x0000000f)) != 0x0000000f) && (i++ < 5000)) |
| 817 | mdelay(1); | 817 | mdelay(1); |
| 818 | if (i >= 5000) { | 818 | if (i > 5000) { |
| 819 | printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); | 819 | printk("%s: Init: serdes 1 not ready, status=%x\n", __func__, u32temp); |
| 820 | /* return 1; */ | 820 | /* return 1; */ |
| 821 | } | 821 | } |
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c index 30bea9689694..018348c01193 100644 --- a/drivers/net/mlx4/main.c +++ b/drivers/net/mlx4/main.c | |||
| @@ -100,6 +100,10 @@ module_param_named(use_prio, use_prio, bool, 0444); | |||
| 100 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " | 100 | MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports " |
| 101 | "(0/1, default 0)"); | 101 | "(0/1, default 0)"); |
| 102 | 102 | ||
| 103 | static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG); | ||
| 104 | module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444); | ||
| 105 | MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); | ||
| 106 | |||
| 103 | int mlx4_check_port_params(struct mlx4_dev *dev, | 107 | int mlx4_check_port_params(struct mlx4_dev *dev, |
| 104 | enum mlx4_port_type *port_type) | 108 | enum mlx4_port_type *port_type) |
| 105 | { | 109 | { |
| @@ -203,12 +207,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) | |||
| 203 | dev->caps.max_cqes = dev_cap->max_cq_sz - 1; | 207 | dev->caps.max_cqes = dev_cap->max_cq_sz - 1; |
| 204 | dev->caps.reserved_cqs = dev_cap->reserved_cqs; | 208 | dev->caps.reserved_cqs = dev_cap->reserved_cqs; |
| 205 | dev->caps.reserved_eqs = dev_cap->reserved_eqs; | 209 | dev->caps.reserved_eqs = dev_cap->reserved_eqs; |
| 210 | dev->caps.mtts_per_seg = 1 << log_mtts_per_seg; | ||
| 206 | dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, | 211 | dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts, |
| 207 | MLX4_MTT_ENTRY_PER_SEG); | 212 | dev->caps.mtts_per_seg); |
| 208 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; | 213 | dev->caps.reserved_mrws = dev_cap->reserved_mrws; |
| 209 | dev->caps.reserved_uars = dev_cap->reserved_uars; | 214 | dev->caps.reserved_uars = dev_cap->reserved_uars; |
| 210 | dev->caps.reserved_pds = dev_cap->reserved_pds; | 215 | dev->caps.reserved_pds = dev_cap->reserved_pds; |
| 211 | dev->caps.mtt_entry_sz = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; | 216 | dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; |
| 212 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; | 217 | dev->caps.max_msg_sz = dev_cap->max_msg_sz; |
| 213 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); | 218 | dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); |
| 214 | dev->caps.flags = dev_cap->flags; | 219 | dev->caps.flags = dev_cap->flags; |
| @@ -1304,6 +1309,11 @@ static int __init mlx4_verify_params(void) | |||
| 1304 | return -1; | 1309 | return -1; |
| 1305 | } | 1310 | } |
| 1306 | 1311 | ||
| 1312 | if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 5)) { | ||
| 1313 | printk(KERN_WARNING "mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg); | ||
| 1314 | return -1; | ||
| 1315 | } | ||
| 1316 | |||
| 1307 | return 0; | 1317 | return 0; |
| 1308 | } | 1318 | } |
| 1309 | 1319 | ||
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 0caf74cae8bc..3b8973d19933 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
| @@ -209,7 +209,7 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift, | |||
| 209 | } else | 209 | } else |
| 210 | mtt->page_shift = page_shift; | 210 | mtt->page_shift = page_shift; |
| 211 | 211 | ||
| 212 | for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) | 212 | for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1) |
| 213 | ++mtt->order; | 213 | ++mtt->order; |
| 214 | 214 | ||
| 215 | mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); | 215 | mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); |
| @@ -350,7 +350,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) | |||
| 350 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | | 350 | mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG | |
| 351 | MLX4_MPT_PD_FLAG_RAE); | 351 | MLX4_MPT_PD_FLAG_RAE); |
| 352 | mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * | 352 | mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) * |
| 353 | MLX4_MTT_ENTRY_PER_SEG); | 353 | dev->caps.mtts_per_seg); |
| 354 | } else { | 354 | } else { |
| 355 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); | 355 | mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS); |
| 356 | } | 356 | } |
| @@ -391,7 +391,7 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
| 391 | (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) | 391 | (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64))) |
| 392 | return -EINVAL; | 392 | return -EINVAL; |
| 393 | 393 | ||
| 394 | if (start_index & (MLX4_MTT_ENTRY_PER_SEG - 1)) | 394 | if (start_index & (dev->caps.mtts_per_seg - 1)) |
| 395 | return -EINVAL; | 395 | return -EINVAL; |
| 396 | 396 | ||
| 397 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + | 397 | mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg + |
diff --git a/drivers/net/mlx4/profile.c b/drivers/net/mlx4/profile.c index cebdf3243ca1..bd22df95adf9 100644 --- a/drivers/net/mlx4/profile.c +++ b/drivers/net/mlx4/profile.c | |||
| @@ -98,7 +98,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, | |||
| 98 | profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; | 98 | profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz; |
| 99 | profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; | 99 | profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz; |
| 100 | profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; | 100 | profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz; |
| 101 | profile[MLX4_RES_MTT].size = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz; | 101 | profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz; |
| 102 | profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; | 102 | profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE; |
| 103 | 103 | ||
| 104 | profile[MLX4_RES_QP].num = request->num_qp; | 104 | profile[MLX4_RES_QP].num = request->num_qp; |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 3aff8a6a389e..ce7cc6c7bcbb 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
| @@ -210,6 +210,7 @@ struct mlx4_caps { | |||
| 210 | int num_comp_vectors; | 210 | int num_comp_vectors; |
| 211 | int num_mpts; | 211 | int num_mpts; |
| 212 | int num_mtt_segs; | 212 | int num_mtt_segs; |
| 213 | int mtts_per_seg; | ||
| 213 | int fmr_reserved_mtts; | 214 | int fmr_reserved_mtts; |
| 214 | int reserved_mtts; | 215 | int reserved_mtts; |
| 215 | int reserved_mrws; | 216 | int reserved_mrws; |
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index bf8f11982dae..9f29d86e5dc9 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h | |||
| @@ -165,6 +165,7 @@ enum { | |||
| 165 | MLX4_WQE_CTRL_IP_CSUM = 1 << 4, | 165 | MLX4_WQE_CTRL_IP_CSUM = 1 << 4, |
| 166 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, | 166 | MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5, |
| 167 | MLX4_WQE_CTRL_INS_VLAN = 1 << 6, | 167 | MLX4_WQE_CTRL_INS_VLAN = 1 << 6, |
| 168 | MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7, | ||
| 168 | }; | 169 | }; |
| 169 | 170 | ||
| 170 | struct mlx4_wqe_ctrl_seg { | 171 | struct mlx4_wqe_ctrl_seg { |
