aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/core/device.c2
-rw-r--r--drivers/infiniband/core/iwcm.c2
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/security.c7
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c22
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c1
10 files changed, 56 insertions, 19 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index f6983357145d..6294a7001d33 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -4458,7 +4458,7 @@ out:
4458 return skb->len; 4458 return skb->len;
4459} 4459}
4460 4460
4461static const struct rdma_nl_cbs cma_cb_table[] = { 4461static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = {
4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, 4462 [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats},
4463}; 4463};
4464 4464
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index 5e1be4949d5f..30914f3baa5f 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1146} 1146}
1147EXPORT_SYMBOL(ib_get_net_dev_by_params); 1147EXPORT_SYMBOL(ib_get_net_dev_by_params);
1148 1148
1149static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { 1149static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1150 [RDMA_NL_LS_OP_RESOLVE] = { 1150 [RDMA_NL_LS_OP_RESOLVE] = {
1151 .doit = ib_nl_handle_resolve_resp, 1151 .doit = ib_nl_handle_resolve_resp,
1152 .flags = RDMA_NL_ADMIN_PERM, 1152 .flags = RDMA_NL_ADMIN_PERM,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index e9e189ec7502..5d676cff41f4 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason)
80} 80}
81EXPORT_SYMBOL(iwcm_reject_msg); 81EXPORT_SYMBOL(iwcm_reject_msg);
82 82
83static struct rdma_nl_cbs iwcm_nl_cb_table[] = { 83static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = {
84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, 84 [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, 85 [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, 86 [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 2fae850a3eff..9a05245a1acf 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -303,7 +303,7 @@ out: cb->args[0] = idx;
303 return skb->len; 303 return skb->len;
304} 304}
305 305
306static const struct rdma_nl_cbs nldev_cb_table[] = { 306static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
307 [RDMA_NLDEV_CMD_GET] = { 307 [RDMA_NLDEV_CMD_GET] = {
308 .doit = nldev_get_doit, 308 .doit = nldev_get_doit,
309 .dump = nldev_get_dumpit, 309 .dump = nldev_get_dumpit,
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index a337386652b0..feafdb961c48 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index)
739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) 739 if (!rdma_protocol_ib(map->agent.device, map->agent.port_num))
740 return 0; 740 return 0;
741 741
742 if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) 742 if (map->agent.qp->qp_type == IB_QPT_SMI) {
743 return -EACCES; 743 if (!map->agent.smp_allowed)
744 return -EACCES;
745 return 0;
746 }
744 747
745 return ib_security_pkey_access(map->agent.device, 748 return ib_security_pkey_access(map->agent.device,
746 map->agent.port_num, 749 map->agent.port_num,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 16d55710b116..d0202bb176a4 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file,
1971 goto release_qp; 1971 goto release_qp;
1972 } 1972 }
1973 1973
1974 if ((cmd->base.attr_mask & IB_QP_ALT_PATH) &&
1975 !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) {
1976 ret = -EINVAL;
1977 goto release_qp;
1978 }
1979
1974 attr->qp_state = cmd->base.qp_state; 1980 attr->qp_state = cmd->base.qp_state;
1975 attr->cur_qp_state = cmd->base.cur_qp_state; 1981 attr->cur_qp_state = cmd->base.cur_qp_state;
1976 attr->path_mtu = cmd->base.path_mtu; 1982 attr->path_mtu = cmd->base.path_mtu;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ea55e95cd2c5..b7bfc536e00f 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -395,6 +395,11 @@ next_cqe:
395 395
396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) 396static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
397{ 397{
398 if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) {
399 WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid);
400 return 0;
401 }
402
398 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) 403 if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
399 return 0; 404 return 0;
400 405
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 5ee7fe433136..38bddd02a943 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
868 868
869 qhp = to_c4iw_qp(ibqp); 869 qhp = to_c4iw_qp(ibqp);
870 spin_lock_irqsave(&qhp->lock, flag); 870 spin_lock_irqsave(&qhp->lock, flag);
871 if (t4_wq_in_error(&qhp->wq)) { 871
872 /*
873 * If the qp has been flushed, then just insert a special
874 * drain cqe.
875 */
876 if (qhp->wq.flushed) {
872 spin_unlock_irqrestore(&qhp->lock, flag); 877 spin_unlock_irqrestore(&qhp->lock, flag);
873 complete_sq_drain_wr(qhp, wr); 878 complete_sq_drain_wr(qhp, wr);
874 return err; 879 return err;
@@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1011 1016
1012 qhp = to_c4iw_qp(ibqp); 1017 qhp = to_c4iw_qp(ibqp);
1013 spin_lock_irqsave(&qhp->lock, flag); 1018 spin_lock_irqsave(&qhp->lock, flag);
1014 if (t4_wq_in_error(&qhp->wq)) { 1019
1020 /*
1021 * If the qp has been flushed, then just insert a special
1022 * drain cqe.
1023 */
1024 if (qhp->wq.flushed) {
1015 spin_unlock_irqrestore(&qhp->lock, flag); 1025 spin_unlock_irqrestore(&qhp->lock, flag);
1016 complete_rq_drain_wr(qhp, wr); 1026 complete_rq_drain_wr(qhp, wr);
1017 return err; 1027 return err;
@@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
1285 spin_unlock_irqrestore(&rchp->lock, flag); 1295 spin_unlock_irqrestore(&rchp->lock, flag);
1286 1296
1287 if (schp == rchp) { 1297 if (schp == rchp) {
1288 if (t4_clear_cq_armed(&rchp->cq) && 1298 if ((rq_flushed || sq_flushed) &&
1289 (rq_flushed || sq_flushed)) { 1299 t4_clear_cq_armed(&rchp->cq)) {
1290 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1300 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1291 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1301 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1292 rchp->ibcq.cq_context); 1302 rchp->ibcq.cq_context);
1293 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1303 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1294 } 1304 }
1295 } else { 1305 } else {
1296 if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { 1306 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
1297 spin_lock_irqsave(&rchp->comp_handler_lock, flag); 1307 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1298 (*rchp->ibcq.comp_handler)(&rchp->ibcq, 1308 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1299 rchp->ibcq.cq_context); 1309 rchp->ibcq.cq_context);
1300 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); 1310 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1301 } 1311 }
1302 if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { 1312 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
1303 spin_lock_irqsave(&schp->comp_handler_lock, flag); 1313 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1304 (*schp->ibcq.comp_handler)(&schp->ibcq, 1314 (*schp->ibcq.comp_handler)(&schp->ibcq,
1305 schp->ibcq.cq_context); 1315 schp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 013049bcdb53..caf490ab24c8 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
666 return (-EOPNOTSUPP); 666 return (-EOPNOTSUPP);
667 } 667 }
668 668
669 if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 |
670 MLX4_IB_RX_HASH_DST_IPV4 |
671 MLX4_IB_RX_HASH_SRC_IPV6 |
672 MLX4_IB_RX_HASH_DST_IPV6 |
673 MLX4_IB_RX_HASH_SRC_PORT_TCP |
674 MLX4_IB_RX_HASH_DST_PORT_TCP |
675 MLX4_IB_RX_HASH_SRC_PORT_UDP |
676 MLX4_IB_RX_HASH_DST_PORT_UDP)) {
677 pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
678 ucmd->rx_hash_fields_mask);
679 return (-EOPNOTSUPP);
680 }
681
669 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && 682 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) &&
670 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { 683 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) {
671 rss_ctx->flags = MLX4_RSS_IPV4; 684 rss_ctx->flags = MLX4_RSS_IPV4;
@@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
691 return (-EOPNOTSUPP); 704 return (-EOPNOTSUPP);
692 } 705 }
693 706
694 if (rss_ctx->flags & MLX4_RSS_IPV4) { 707 if (rss_ctx->flags & MLX4_RSS_IPV4)
695 rss_ctx->flags |= MLX4_RSS_UDP_IPV4; 708 rss_ctx->flags |= MLX4_RSS_UDP_IPV4;
696 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 709 if (rss_ctx->flags & MLX4_RSS_IPV6)
697 rss_ctx->flags |= MLX4_RSS_UDP_IPV6; 710 rss_ctx->flags |= MLX4_RSS_UDP_IPV6;
698 } else { 711 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
699 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); 712 pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n");
700 return (-EOPNOTSUPP); 713 return (-EOPNOTSUPP);
701 } 714 }
@@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
707 720
708 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && 721 if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) &&
709 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 722 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
710 if (rss_ctx->flags & MLX4_RSS_IPV4) { 723 if (rss_ctx->flags & MLX4_RSS_IPV4)
711 rss_ctx->flags |= MLX4_RSS_TCP_IPV4; 724 rss_ctx->flags |= MLX4_RSS_TCP_IPV4;
712 } else if (rss_ctx->flags & MLX4_RSS_IPV6) { 725 if (rss_ctx->flags & MLX4_RSS_IPV6)
713 rss_ctx->flags |= MLX4_RSS_TCP_IPV6; 726 rss_ctx->flags |= MLX4_RSS_TCP_IPV6;
714 } else { 727 if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) {
715 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); 728 pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n");
716 return (-EOPNOTSUPP); 729 return (-EOPNOTSUPP);
717 } 730 }
718
719 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || 731 } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) ||
720 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { 732 (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) {
721 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); 733 pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n");
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 87f4bd99cdf7..2c13123bfd69 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1145 noio_flag = memalloc_noio_save(); 1145 noio_flag = memalloc_noio_save();
1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); 1146 p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring));
1147 if (!p->tx_ring) { 1147 if (!p->tx_ring) {
1148 memalloc_noio_restore(noio_flag);
1148 ret = -ENOMEM; 1149 ret = -ENOMEM;
1149 goto err_tx; 1150 goto err_tx;
1150 } 1151 }