aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/qp.c')
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 4649d83203df..ceb33327091a 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -261,7 +261,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
261 event.event = IB_EVENT_QP_ACCESS_ERR; 261 event.event = IB_EVENT_QP_ACCESS_ERR;
262 break; 262 break;
263 default: 263 default:
264 printk(KERN_WARNING "mlx4_ib: Unexpected event type %d " 264 pr_warn("Unexpected event type %d "
265 "on QP %06x\n", type, qp->qpn); 265 "on QP %06x\n", type, qp->qpn);
266 return; 266 return;
267 } 267 }
@@ -725,7 +725,7 @@ static void destroy_qp_common(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp,
725 if (qp->state != IB_QPS_RESET) 725 if (qp->state != IB_QPS_RESET)
726 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state), 726 if (mlx4_qp_modify(dev->dev, NULL, to_mlx4_state(qp->state),
727 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp)) 727 MLX4_QP_STATE_RST, NULL, 0, 0, &qp->mqp))
728 printk(KERN_WARNING "mlx4_ib: modify QP %06x to RESET failed.\n", 728 pr_warn("modify QP %06x to RESET failed.\n",
729 qp->mqp.qpn); 729 qp->mqp.qpn);
730 730
731 get_cqs(qp, &send_cq, &recv_cq); 731 get_cqs(qp, &send_cq, &recv_cq);
@@ -958,7 +958,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
958 958
959 if (ah->ah_flags & IB_AH_GRH) { 959 if (ah->ah_flags & IB_AH_GRH) {
960 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) { 960 if (ah->grh.sgid_index >= dev->dev->caps.gid_table_len[port]) {
961 printk(KERN_ERR "sgid_index (%u) too large. max is %d\n", 961 pr_err("sgid_index (%u) too large. max is %d\n",
962 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1); 962 ah->grh.sgid_index, dev->dev->caps.gid_table_len[port] - 1);
963 return -1; 963 return -1;
964 } 964 }
@@ -1064,7 +1064,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1064 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12; 1064 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1065 } else if (attr_mask & IB_QP_PATH_MTU) { 1065 } else if (attr_mask & IB_QP_PATH_MTU) {
1066 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { 1066 if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1067 printk(KERN_ERR "path MTU (%u) is invalid\n", 1067 pr_err("path MTU (%u) is invalid\n",
1068 attr->path_mtu); 1068 attr->path_mtu);
1069 goto out; 1069 goto out;
1070 } 1070 }
@@ -1281,7 +1281,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1281 if (is_qp0(dev, qp)) { 1281 if (is_qp0(dev, qp)) {
1282 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR) 1282 if (cur_state != IB_QPS_RTR && new_state == IB_QPS_RTR)
1283 if (mlx4_INIT_PORT(dev->dev, qp->port)) 1283 if (mlx4_INIT_PORT(dev->dev, qp->port))
1284 printk(KERN_WARNING "INIT_PORT failed for port %d\n", 1284 pr_warn("INIT_PORT failed for port %d\n",
1285 qp->port); 1285 qp->port);
1286 1286
1287 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR && 1287 if (cur_state != IB_QPS_RESET && cur_state != IB_QPS_ERR &&
@@ -1444,6 +1444,9 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1444 1444
1445 if (is_eth) { 1445 if (is_eth) {
1446 u8 *smac; 1446 u8 *smac;
1447 u16 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
1448
1449 mlx->sched_prio = cpu_to_be16(pcp);
1447 1450
1448 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); 1451 memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6);
1449 /* FIXME: cache smac value? */ 1452 /* FIXME: cache smac value? */
@@ -1454,10 +1457,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1454 if (!is_vlan) { 1457 if (!is_vlan) {
1455 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 1458 sqp->ud_header.eth.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1456 } else { 1459 } else {
1457 u16 pcp;
1458
1459 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE); 1460 sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
1460 pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
1461 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp); 1461 sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
1462 } 1462 }
1463 } else { 1463 } else {
@@ -1480,16 +1480,16 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
1480 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf); 1480 header_size = ib_ud_header_pack(&sqp->ud_header, sqp->header_buf);
1481 1481
1482 if (0) { 1482 if (0) {
1483 printk(KERN_ERR "built UD header of size %d:\n", header_size); 1483 pr_err("built UD header of size %d:\n", header_size);
1484 for (i = 0; i < header_size / 4; ++i) { 1484 for (i = 0; i < header_size / 4; ++i) {
1485 if (i % 8 == 0) 1485 if (i % 8 == 0)
1486 printk(" [%02x] ", i * 4); 1486 pr_err(" [%02x] ", i * 4);
1487 printk(" %08x", 1487 pr_cont(" %08x",
1488 be32_to_cpu(((__be32 *) sqp->header_buf)[i])); 1488 be32_to_cpu(((__be32 *) sqp->header_buf)[i]));
1489 if ((i + 1) % 8 == 0) 1489 if ((i + 1) % 8 == 0)
1490 printk("\n"); 1490 pr_cont("\n");
1491 } 1491 }
1492 printk("\n"); 1492 pr_err("\n");
1493 } 1493 }
1494 1494
1495 /* 1495 /*