aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c176
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c34
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c26
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c5
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c13
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c44
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c8
14 files changed, 210 insertions, 162 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index fb61f6685809..4b8c6116c058 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -472,13 +472,13 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
472 skb = get_skb(skb, flowclen, GFP_KERNEL); 472 skb = get_skb(skb, flowclen, GFP_KERNEL);
473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen); 473 flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
474 474
475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) | 475 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
476 FW_FLOWC_WR_NPARAMS(8)); 476 FW_FLOWC_WR_NPARAMS_V(8));
477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen, 477 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(flowclen,
478 16)) | FW_WR_FLOWID(ep->hwtid)); 478 16)) | FW_WR_FLOWID_V(ep->hwtid));
479 479
480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 480 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN 481 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
482 (ep->com.dev->rdev.lldi.pf)); 482 (ep->com.dev->rdev.lldi.pf));
483 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 483 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
484 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 484 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
@@ -649,31 +649,31 @@ static int send_connect(struct c4iw_ep *ep)
649 * remainder will be specified in the rx_data_ack. 649 * remainder will be specified in the rx_data_ack.
650 */ 650 */
651 win = ep->rcv_win >> 10; 651 win = ep->rcv_win >> 10;
652 if (win > RCV_BUFSIZ_MASK) 652 if (win > RCV_BUFSIZ_M)
653 win = RCV_BUFSIZ_MASK; 653 win = RCV_BUFSIZ_M;
654 654
655 opt0 = (nocong ? NO_CONG(1) : 0) | 655 opt0 = (nocong ? NO_CONG(1) : 0) |
656 KEEP_ALIVE(1) | 656 KEEP_ALIVE_F |
657 DELACK(1) | 657 DELACK(1) |
658 WND_SCALE(wscale) | 658 WND_SCALE_V(wscale) |
659 MSS_IDX(mtu_idx) | 659 MSS_IDX_V(mtu_idx) |
660 L2T_IDX(ep->l2t->idx) | 660 L2T_IDX_V(ep->l2t->idx) |
661 TX_CHAN(ep->tx_chan) | 661 TX_CHAN_V(ep->tx_chan) |
662 SMAC_SEL(ep->smac_idx) | 662 SMAC_SEL_V(ep->smac_idx) |
663 DSCP(ep->tos) | 663 DSCP(ep->tos) |
664 ULP_MODE(ULP_MODE_TCPDDP) | 664 ULP_MODE_V(ULP_MODE_TCPDDP) |
665 RCV_BUFSIZ(win); 665 RCV_BUFSIZ_V(win);
666 opt2 = RX_CHANNEL(0) | 666 opt2 = RX_CHANNEL_V(0) |
667 CCTRL_ECN(enable_ecn) | 667 CCTRL_ECN(enable_ecn) |
668 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 668 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
669 if (enable_tcp_timestamps) 669 if (enable_tcp_timestamps)
670 opt2 |= TSTAMPS_EN(1); 670 opt2 |= TSTAMPS_EN(1);
671 if (enable_tcp_sack) 671 if (enable_tcp_sack)
672 opt2 |= SACK_EN(1); 672 opt2 |= SACK_EN(1);
673 if (wscale && enable_tcp_window_scaling) 673 if (wscale && enable_tcp_window_scaling)
674 opt2 |= WND_SCALE_EN(1); 674 opt2 |= WND_SCALE_EN_F;
675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 675 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
676 opt2 |= T5_OPT_2_VALID; 676 opt2 |= T5_OPT_2_VALID_F;
677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 677 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 678 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
679 } 679 }
@@ -736,7 +736,7 @@ static int send_connect(struct c4iw_ep *ep)
736 t5_req->local_ip = la->sin_addr.s_addr; 736 t5_req->local_ip = la->sin_addr.s_addr;
737 t5_req->peer_ip = ra->sin_addr.s_addr; 737 t5_req->peer_ip = ra->sin_addr.s_addr;
738 t5_req->opt0 = cpu_to_be64(opt0); 738 t5_req->opt0 = cpu_to_be64(opt0);
739 t5_req->params = cpu_to_be64(V_FILTER_TUPLE( 739 t5_req->params = cpu_to_be64(FILTER_TUPLE_V(
740 cxgb4_select_ntuple( 740 cxgb4_select_ntuple(
741 ep->com.dev->rdev.lldi.ports[0], 741 ep->com.dev->rdev.lldi.ports[0],
742 ep->l2t))); 742 ep->l2t)));
@@ -762,7 +762,7 @@ static int send_connect(struct c4iw_ep *ep)
762 t5_req6->peer_ip_lo = *((__be64 *) 762 t5_req6->peer_ip_lo = *((__be64 *)
763 (ra6->sin6_addr.s6_addr + 8)); 763 (ra6->sin6_addr.s6_addr + 8));
764 t5_req6->opt0 = cpu_to_be64(opt0); 764 t5_req6->opt0 = cpu_to_be64(opt0);
765 t5_req6->params = cpu_to_be64(V_FILTER_TUPLE( 765 t5_req6->params = cpu_to_be64(FILTER_TUPLE_V(
766 cxgb4_select_ntuple( 766 cxgb4_select_ntuple(
767 ep->com.dev->rdev.lldi.ports[0], 767 ep->com.dev->rdev.lldi.ports[0],
768 ep->l2t))); 768 ep->l2t)));
@@ -803,16 +803,16 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 803 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
804 memset(req, 0, wrlen); 804 memset(req, 0, wrlen);
805 req->op_to_immdlen = cpu_to_be32( 805 req->op_to_immdlen = cpu_to_be32(
806 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 806 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
807 FW_WR_COMPL(1) | 807 FW_WR_COMPL_F |
808 FW_WR_IMMDLEN(mpalen)); 808 FW_WR_IMMDLEN_V(mpalen));
809 req->flowid_len16 = cpu_to_be32( 809 req->flowid_len16 = cpu_to_be32(
810 FW_WR_FLOWID(ep->hwtid) | 810 FW_WR_FLOWID_V(ep->hwtid) |
811 FW_WR_LEN16(wrlen >> 4)); 811 FW_WR_LEN16_V(wrlen >> 4));
812 req->plen = cpu_to_be32(mpalen); 812 req->plen = cpu_to_be32(mpalen);
813 req->tunnel_to_proxy = cpu_to_be32( 813 req->tunnel_to_proxy = cpu_to_be32(
814 FW_OFLD_TX_DATA_WR_FLUSH(1) | 814 FW_OFLD_TX_DATA_WR_FLUSH_F |
815 FW_OFLD_TX_DATA_WR_SHOVE(1)); 815 FW_OFLD_TX_DATA_WR_SHOVE_F);
816 816
817 mpa = (struct mpa_message *)(req + 1); 817 mpa = (struct mpa_message *)(req + 1);
818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 818 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
@@ -897,16 +897,16 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 897 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
898 memset(req, 0, wrlen); 898 memset(req, 0, wrlen);
899 req->op_to_immdlen = cpu_to_be32( 899 req->op_to_immdlen = cpu_to_be32(
900 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 900 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
901 FW_WR_COMPL(1) | 901 FW_WR_COMPL_F |
902 FW_WR_IMMDLEN(mpalen)); 902 FW_WR_IMMDLEN_V(mpalen));
903 req->flowid_len16 = cpu_to_be32( 903 req->flowid_len16 = cpu_to_be32(
904 FW_WR_FLOWID(ep->hwtid) | 904 FW_WR_FLOWID_V(ep->hwtid) |
905 FW_WR_LEN16(wrlen >> 4)); 905 FW_WR_LEN16_V(wrlen >> 4));
906 req->plen = cpu_to_be32(mpalen); 906 req->plen = cpu_to_be32(mpalen);
907 req->tunnel_to_proxy = cpu_to_be32( 907 req->tunnel_to_proxy = cpu_to_be32(
908 FW_OFLD_TX_DATA_WR_FLUSH(1) | 908 FW_OFLD_TX_DATA_WR_FLUSH_F |
909 FW_OFLD_TX_DATA_WR_SHOVE(1)); 909 FW_OFLD_TX_DATA_WR_SHOVE_F);
910 910
911 mpa = (struct mpa_message *)(req + 1); 911 mpa = (struct mpa_message *)(req + 1);
912 memset(mpa, 0, sizeof(*mpa)); 912 memset(mpa, 0, sizeof(*mpa));
@@ -977,16 +977,16 @@ static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 977 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
978 memset(req, 0, wrlen); 978 memset(req, 0, wrlen);
979 req->op_to_immdlen = cpu_to_be32( 979 req->op_to_immdlen = cpu_to_be32(
980 FW_WR_OP(FW_OFLD_TX_DATA_WR) | 980 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
981 FW_WR_COMPL(1) | 981 FW_WR_COMPL_F |
982 FW_WR_IMMDLEN(mpalen)); 982 FW_WR_IMMDLEN_V(mpalen));
983 req->flowid_len16 = cpu_to_be32( 983 req->flowid_len16 = cpu_to_be32(
984 FW_WR_FLOWID(ep->hwtid) | 984 FW_WR_FLOWID_V(ep->hwtid) |
985 FW_WR_LEN16(wrlen >> 4)); 985 FW_WR_LEN16_V(wrlen >> 4));
986 req->plen = cpu_to_be32(mpalen); 986 req->plen = cpu_to_be32(mpalen);
987 req->tunnel_to_proxy = cpu_to_be32( 987 req->tunnel_to_proxy = cpu_to_be32(
988 FW_OFLD_TX_DATA_WR_FLUSH(1) | 988 FW_OFLD_TX_DATA_WR_FLUSH_F |
989 FW_OFLD_TX_DATA_WR_SHOVE(1)); 989 FW_OFLD_TX_DATA_WR_SHOVE_F);
990 990
991 mpa = (struct mpa_message *)(req + 1); 991 mpa = (struct mpa_message *)(req + 1);
992 memset(mpa, 0, sizeof(*mpa)); 992 memset(mpa, 0, sizeof(*mpa));
@@ -1249,15 +1249,15 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1249 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1249 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1250 * then add the overage in to the credits returned. 1250 * then add the overage in to the credits returned.
1251 */ 1251 */
1252 if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024) 1252 if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1253 credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024; 1253 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1254 1254
1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1255 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
1256 memset(req, 0, wrlen); 1256 memset(req, 0, wrlen);
1257 INIT_TP_WR(req, ep->hwtid); 1257 INIT_TP_WR(req, ep->hwtid);
1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1258 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
1259 ep->hwtid)); 1259 ep->hwtid));
1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) | 1260 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F |
1261 F_RX_DACK_CHANGE | 1261 F_RX_DACK_CHANGE |
1262 V_RX_DACK_MODE(dack_mode)); 1262 V_RX_DACK_MODE(dack_mode));
1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1263 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
@@ -1751,7 +1751,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1751 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1752 memset(req, 0, sizeof(*req)); 1752 memset(req, 0, sizeof(*req));
1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); 1753 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
1754 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 1754 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1755 req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1756 ep->com.dev->rdev.lldi.ports[0], 1756 ep->com.dev->rdev.lldi.ports[0],
1757 ep->l2t)); 1757 ep->l2t));
@@ -1762,10 +1762,10 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1762 req->le.pport = sin->sin_port; 1762 req->le.pport = sin->sin_port;
1763 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1763 req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1764 req->tcb.t_state_to_astid = 1764 req->tcb.t_state_to_astid =
1765 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | 1765 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1766 V_FW_OFLD_CONNECTION_WR_ASTID(atid)); 1766 FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1767 req->tcb.cplrxdataack_cplpassacceptrpl = 1767 req->tcb.cplrxdataack_cplpassacceptrpl =
1768 htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); 1768 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1769 req->tcb.tx_max = (__force __be32) jiffies; 1769 req->tcb.tx_max = (__force __be32) jiffies;
1770 req->tcb.rcv_adv = htons(1); 1770 req->tcb.rcv_adv = htons(1);
1771 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1771 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
@@ -1778,34 +1778,34 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1778 * remainder will be specified in the rx_data_ack. 1778 * remainder will be specified in the rx_data_ack.
1779 */ 1779 */
1780 win = ep->rcv_win >> 10; 1780 win = ep->rcv_win >> 10;
1781 if (win > RCV_BUFSIZ_MASK) 1781 if (win > RCV_BUFSIZ_M)
1782 win = RCV_BUFSIZ_MASK; 1782 win = RCV_BUFSIZ_M;
1783 1783
1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | 1784 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
1785 (nocong ? NO_CONG(1) : 0) | 1785 (nocong ? NO_CONG(1) : 0) |
1786 KEEP_ALIVE(1) | 1786 KEEP_ALIVE_F |
1787 DELACK(1) | 1787 DELACK(1) |
1788 WND_SCALE(wscale) | 1788 WND_SCALE_V(wscale) |
1789 MSS_IDX(mtu_idx) | 1789 MSS_IDX_V(mtu_idx) |
1790 L2T_IDX(ep->l2t->idx) | 1790 L2T_IDX_V(ep->l2t->idx) |
1791 TX_CHAN(ep->tx_chan) | 1791 TX_CHAN_V(ep->tx_chan) |
1792 SMAC_SEL(ep->smac_idx) | 1792 SMAC_SEL_V(ep->smac_idx) |
1793 DSCP(ep->tos) | 1793 DSCP(ep->tos) |
1794 ULP_MODE(ULP_MODE_TCPDDP) | 1794 ULP_MODE_V(ULP_MODE_TCPDDP) |
1795 RCV_BUFSIZ(win)); 1795 RCV_BUFSIZ_V(win));
1796 req->tcb.opt2 = (__force __be32) (PACE(1) | 1796 req->tcb.opt2 = (__force __be32) (PACE(1) |
1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 1797 TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1798 RX_CHANNEL(0) | 1798 RX_CHANNEL_V(0) |
1799 CCTRL_ECN(enable_ecn) | 1799 CCTRL_ECN(enable_ecn) |
1800 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); 1800 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1801 if (enable_tcp_timestamps) 1801 if (enable_tcp_timestamps)
1802 req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); 1802 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN(1);
1803 if (enable_tcp_sack) 1803 if (enable_tcp_sack)
1804 req->tcb.opt2 |= (__force __be32) SACK_EN(1); 1804 req->tcb.opt2 |= (__force __be32)SACK_EN(1);
1805 if (wscale && enable_tcp_window_scaling) 1805 if (wscale && enable_tcp_window_scaling)
1806 req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); 1806 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1807 req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); 1807 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1808 req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); 1808 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 1809 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1810 set_bit(ACT_OFLD_CONN, &ep->com.history); 1810 set_bit(ACT_OFLD_CONN, &ep->com.history);
1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1811 c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
@@ -2178,28 +2178,28 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2178 * remainder will be specified in the rx_data_ack. 2178 * remainder will be specified in the rx_data_ack.
2179 */ 2179 */
2180 win = ep->rcv_win >> 10; 2180 win = ep->rcv_win >> 10;
2181 if (win > RCV_BUFSIZ_MASK) 2181 if (win > RCV_BUFSIZ_M)
2182 win = RCV_BUFSIZ_MASK; 2182 win = RCV_BUFSIZ_M;
2183 opt0 = (nocong ? NO_CONG(1) : 0) | 2183 opt0 = (nocong ? NO_CONG(1) : 0) |
2184 KEEP_ALIVE(1) | 2184 KEEP_ALIVE_F |
2185 DELACK(1) | 2185 DELACK(1) |
2186 WND_SCALE(wscale) | 2186 WND_SCALE_V(wscale) |
2187 MSS_IDX(mtu_idx) | 2187 MSS_IDX_V(mtu_idx) |
2188 L2T_IDX(ep->l2t->idx) | 2188 L2T_IDX_V(ep->l2t->idx) |
2189 TX_CHAN(ep->tx_chan) | 2189 TX_CHAN_V(ep->tx_chan) |
2190 SMAC_SEL(ep->smac_idx) | 2190 SMAC_SEL_V(ep->smac_idx) |
2191 DSCP(ep->tos >> 2) | 2191 DSCP(ep->tos >> 2) |
2192 ULP_MODE(ULP_MODE_TCPDDP) | 2192 ULP_MODE_V(ULP_MODE_TCPDDP) |
2193 RCV_BUFSIZ(win); 2193 RCV_BUFSIZ_V(win);
2194 opt2 = RX_CHANNEL(0) | 2194 opt2 = RX_CHANNEL_V(0) |
2195 RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid); 2195 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2196 2196
2197 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2197 if (enable_tcp_timestamps && req->tcpopt.tstamp)
2198 opt2 |= TSTAMPS_EN(1); 2198 opt2 |= TSTAMPS_EN(1);
2199 if (enable_tcp_sack && req->tcpopt.sack) 2199 if (enable_tcp_sack && req->tcpopt.sack)
2200 opt2 |= SACK_EN(1); 2200 opt2 |= SACK_EN(1);
2201 if (wscale && enable_tcp_window_scaling) 2201 if (wscale && enable_tcp_window_scaling)
2202 opt2 |= WND_SCALE_EN(1); 2202 opt2 |= WND_SCALE_EN_F;
2203 if (enable_ecn) { 2203 if (enable_ecn) {
2204 const struct tcphdr *tcph; 2204 const struct tcphdr *tcph;
2205 u32 hlen = ntohl(req->hdr_len); 2205 u32 hlen = ntohl(req->hdr_len);
@@ -2211,7 +2211,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2211 } 2211 }
2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) { 2212 if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
2213 u32 isn = (prandom_u32() & ~7UL) - 1; 2213 u32 isn = (prandom_u32() & ~7UL) - 1;
2214 opt2 |= T5_OPT_2_VALID; 2214 opt2 |= T5_OPT_2_VALID_F;
2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 2215 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */ 2216 opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
2217 rpl5 = (void *)rpl; 2217 rpl5 = (void *)rpl;
@@ -3537,9 +3537,9 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3537 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3538 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3539 memset(req, 0, sizeof(*req)); 3539 memset(req, 0, sizeof(*req));
3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); 3540 req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3541 req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); 3541 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3542 req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); 3542 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3543 req->le.filter = (__force __be32) filter; 3543 req->le.filter = (__force __be32) filter;
3544 req->le.lport = lport; 3544 req->le.lport = lport;
3545 req->le.pport = rport; 3545 req->le.pport = rport;
@@ -3548,16 +3548,16 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3548 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3548 req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3549 req->tcb.rcv_adv = htons(window); 3549 req->tcb.rcv_adv = htons(window);
3550 req->tcb.t_state_to_astid = 3550 req->tcb.t_state_to_astid =
3551 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | 3551 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3552 V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | 3552 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3553 V_FW_OFLD_CONNECTION_WR_ASTID( 3553 FW_OFLD_CONNECTION_WR_ASTID_V(
3554 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); 3554 GET_PASS_OPEN_TID(ntohl(cpl->tos_stid))));
3555 3555
3556 /* 3556 /*
3557 * We store the qid in opt2 which will be used by the firmware 3557 * We store the qid in opt2 which will be used by the firmware
3558 * to send us the wr response. 3558 * to send us the wr response.
3559 */ 3559 */
3560 req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); 3560 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3561 3561
3562 /* 3562 /*
3563 * We initialize the MSS index in TCB to 0xF. 3563 * We initialize the MSS index in TCB to 0xF.
@@ -3565,7 +3565,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3565 * TCB picks up the correct value. If this was 0 3565 * TCB picks up the correct value. If this was 0
3566 * TP will ignore any value > 0 for MSS index. 3566 * TP will ignore any value > 0 for MSS index.
3567 */ 3567 */
3568 req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); 3568 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3569 req->cookie = (unsigned long)skb; 3569 req->cookie = (unsigned long)skb;
3570 3570
3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3571 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 0f773e78e080..e9fd3a029296 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -51,9 +51,9 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 51 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
52 memset(res_wr, 0, wr_len); 52 memset(res_wr, 0, wr_len);
53 res_wr->op_nres = cpu_to_be32( 53 res_wr->op_nres = cpu_to_be32(
54 FW_WR_OP(FW_RI_RES_WR) | 54 FW_WR_OP_V(FW_RI_RES_WR) |
55 V_FW_RI_RES_WR_NRES(1) | 55 V_FW_RI_RES_WR_NRES(1) |
56 FW_WR_COMPL(1)); 56 FW_WR_COMPL_F);
57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 57 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
58 res_wr->cookie = (unsigned long) &wr_wait; 58 res_wr->cookie = (unsigned long) &wr_wait;
59 res = res_wr->res; 59 res = res_wr->res;
@@ -121,9 +121,9 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 121 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
122 memset(res_wr, 0, wr_len); 122 memset(res_wr, 0, wr_len);
123 res_wr->op_nres = cpu_to_be32( 123 res_wr->op_nres = cpu_to_be32(
124 FW_WR_OP(FW_RI_RES_WR) | 124 FW_WR_OP_V(FW_RI_RES_WR) |
125 V_FW_RI_RES_WR_NRES(1) | 125 V_FW_RI_RES_WR_NRES(1) |
126 FW_WR_COMPL(1)); 126 FW_WR_COMPL_F);
127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 127 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
128 res_wr->cookie = (unsigned long) &wr_wait; 128 res_wr->cookie = (unsigned long) &wr_wait;
129 res = res_wr->res; 129 res = res_wr->res;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index ec7a2988a703..0744455cd88b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -74,18 +74,18 @@ static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr,
74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len); 74 req = (struct ulp_mem_io *)__skb_put(skb, wr_len);
75 memset(req, 0, wr_len); 75 memset(req, 0, wr_len);
76 INIT_ULPTX_WR(req, wr_len, 0, 0); 76 INIT_ULPTX_WR(req, wr_len, 0, 0);
77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | 77 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
78 (wait ? FW_WR_COMPL(1) : 0)); 78 (wait ? FW_WR_COMPL_F : 0));
79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L; 79 req->wr.wr_lo = wait ? (__force __be64)(unsigned long) &wr_wait : 0L;
80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 80 req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
81 req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); 81 req->cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); 82 req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1));
83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); 83 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(len>>5));
84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); 84 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16));
85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr)); 85 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr));
86 86
87 sgl = (struct ulptx_sgl *)(req + 1); 87 sgl = (struct ulptx_sgl *)(req + 1);
88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | 88 sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_DSGL) |
89 ULPTX_NSGE(1)); 89 ULPTX_NSGE(1));
90 sgl->len0 = cpu_to_be32(len); 90 sgl->len0 = cpu_to_be32(len);
91 sgl->addr0 = cpu_to_be64(data); 91 sgl->addr0 = cpu_to_be64(data);
@@ -107,12 +107,12 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
107 u8 wr_len, *to_dp, *from_dp; 107 u8 wr_len, *to_dp, *from_dp;
108 int copy_len, num_wqe, i, ret = 0; 108 int copy_len, num_wqe, i, ret = 0;
109 struct c4iw_wr_wait wr_wait; 109 struct c4iw_wr_wait wr_wait;
110 __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); 110 __be32 cmd = cpu_to_be32(ULPTX_CMD_V(ULP_TX_MEM_WRITE));
111 111
112 if (is_t4(rdev->lldi.adapter_type)) 112 if (is_t4(rdev->lldi.adapter_type))
113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1)); 113 cmd |= cpu_to_be32(ULP_MEMIO_ORDER_F);
114 else 114 else
115 cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); 115 cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
116 116
117 addr &= 0x7FFFFFF; 117 addr &= 0x7FFFFFF;
118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len); 118 PDBG("%s addr 0x%x len %u\n", __func__, addr, len);
@@ -135,23 +135,23 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
135 INIT_ULPTX_WR(req, wr_len, 0, 0); 135 INIT_ULPTX_WR(req, wr_len, 0, 0);
136 136
137 if (i == (num_wqe-1)) { 137 if (i == (num_wqe-1)) {
138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | 138 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
139 FW_WR_COMPL(1)); 139 FW_WR_COMPL_F);
140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; 140 req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
141 } else 141 } else
142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); 142 req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
143 req->wr.wr_mid = cpu_to_be32( 143 req->wr.wr_mid = cpu_to_be32(
144 FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); 144 FW_WR_LEN16_V(DIV_ROUND_UP(wr_len, 16)));
145 145
146 req->cmd = cmd; 146 req->cmd = cmd;
147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( 147 req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN_V(
148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); 148 DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO)));
149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 149 req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr),
150 16)); 150 16));
151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3)); 151 req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR_V(addr + i * 3));
152 152
153 sc = (struct ulptx_idata *)(req + 1); 153 sc = (struct ulptx_idata *)(req + 1);
154 sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM)); 154 sc->cmd_more = cpu_to_be32(ULPTX_CMD_V(ULP_TX_SC_IMM));
155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); 155 sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO));
156 156
157 to_dp = (u8 *)(sc + 1); 157 to_dp = (u8 *)(sc + 1);
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 72e3b69d1b76..66bd6a2ad83b 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -408,10 +408,10 @@ static ssize_t show_fw_ver(struct device *dev, struct device_attribute *attr,
408 PDBG("%s dev 0x%p\n", __func__, dev); 408 PDBG("%s dev 0x%p\n", __func__, dev);
409 409
410 return sprintf(buf, "%u.%u.%u.%u\n", 410 return sprintf(buf, "%u.%u.%u.%u\n",
411 FW_HDR_FW_VER_MAJOR_GET(c4iw_dev->rdev.lldi.fw_vers), 411 FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
412 FW_HDR_FW_VER_MINOR_GET(c4iw_dev->rdev.lldi.fw_vers), 412 FW_HDR_FW_VER_MINOR_G(c4iw_dev->rdev.lldi.fw_vers),
413 FW_HDR_FW_VER_MICRO_GET(c4iw_dev->rdev.lldi.fw_vers), 413 FW_HDR_FW_VER_MICRO_G(c4iw_dev->rdev.lldi.fw_vers),
414 FW_HDR_FW_VER_BUILD_GET(c4iw_dev->rdev.lldi.fw_vers)); 414 FW_HDR_FW_VER_BUILD_G(c4iw_dev->rdev.lldi.fw_vers));
415} 415}
416 416
417static ssize_t show_hca(struct device *dev, struct device_attribute *attr, 417static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 41cd6882b648..2ed3ece2b2ee 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -271,9 +271,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
271 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len); 271 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
272 memset(res_wr, 0, wr_len); 272 memset(res_wr, 0, wr_len);
273 res_wr->op_nres = cpu_to_be32( 273 res_wr->op_nres = cpu_to_be32(
274 FW_WR_OP(FW_RI_RES_WR) | 274 FW_WR_OP_V(FW_RI_RES_WR) |
275 V_FW_RI_RES_WR_NRES(2) | 275 V_FW_RI_RES_WR_NRES(2) |
276 FW_WR_COMPL(1)); 276 FW_WR_COMPL_F);
277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16)); 277 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
278 res_wr->cookie = (unsigned long) &wr_wait; 278 res_wr->cookie = (unsigned long) &wr_wait;
279 res = res_wr->res; 279 res = res_wr->res;
@@ -1082,10 +1082,10 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1082 1082
1083 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1083 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1084 memset(wqe, 0, sizeof *wqe); 1084 memset(wqe, 0, sizeof *wqe);
1085 wqe->op_compl = cpu_to_be32(FW_WR_OP(FW_RI_INIT_WR)); 1085 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
1086 wqe->flowid_len16 = cpu_to_be32( 1086 wqe->flowid_len16 = cpu_to_be32(
1087 FW_WR_FLOWID(qhp->ep->hwtid) | 1087 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1088 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1088 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1089 1089
1090 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE; 1090 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
1091 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); 1091 wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term);
@@ -1204,11 +1204,11 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1204 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1204 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1205 memset(wqe, 0, sizeof *wqe); 1205 memset(wqe, 0, sizeof *wqe);
1206 wqe->op_compl = cpu_to_be32( 1206 wqe->op_compl = cpu_to_be32(
1207 FW_WR_OP(FW_RI_INIT_WR) | 1207 FW_WR_OP_V(FW_RI_INIT_WR) |
1208 FW_WR_COMPL(1)); 1208 FW_WR_COMPL_F);
1209 wqe->flowid_len16 = cpu_to_be32( 1209 wqe->flowid_len16 = cpu_to_be32(
1210 FW_WR_FLOWID(ep->hwtid) | 1210 FW_WR_FLOWID_V(ep->hwtid) |
1211 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1211 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1212 wqe->cookie = (unsigned long) &ep->com.wr_wait; 1212 wqe->cookie = (unsigned long) &ep->com.wr_wait;
1213 1213
1214 wqe->u.fini.type = FW_RI_TYPE_FINI; 1214 wqe->u.fini.type = FW_RI_TYPE_FINI;
@@ -1273,11 +1273,11 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1273 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); 1273 wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
1274 memset(wqe, 0, sizeof *wqe); 1274 memset(wqe, 0, sizeof *wqe);
1275 wqe->op_compl = cpu_to_be32( 1275 wqe->op_compl = cpu_to_be32(
1276 FW_WR_OP(FW_RI_INIT_WR) | 1276 FW_WR_OP_V(FW_RI_INIT_WR) |
1277 FW_WR_COMPL(1)); 1277 FW_WR_COMPL_F);
1278 wqe->flowid_len16 = cpu_to_be32( 1278 wqe->flowid_len16 = cpu_to_be32(
1279 FW_WR_FLOWID(qhp->ep->hwtid) | 1279 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1280 FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); 1280 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
1281 1281
1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait; 1282 wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
1283 1283
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 1066eec854a9..a3b70f6c4035 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -233,7 +233,10 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, int entries, int vector
233 if (err) 233 if (err)
234 goto err_dbmap; 234 goto err_dbmap;
235 235
236 cq->mcq.comp = mlx4_ib_cq_comp; 236 if (context)
237 cq->mcq.tasklet_ctx.comp = mlx4_ib_cq_comp;
238 else
239 cq->mcq.comp = mlx4_ib_cq_comp;
237 cq->mcq.event = mlx4_ib_cq_event; 240 cq->mcq.event = mlx4_ib_cq_event;
238 241
239 if (context) 242 if (context)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 8b72cf392b34..57ecc5b204f3 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1975,8 +1975,7 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1975 dev->caps.num_ports > dev->caps.comp_pool) 1975 dev->caps.num_ports > dev->caps.comp_pool)
1976 return; 1976 return;
1977 1977
1978 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ 1978 eq_per_port = dev->caps.comp_pool / dev->caps.num_ports;
1979 dev->caps.num_ports);
1980 1979
1981 /* Init eq table */ 1980 /* Init eq table */
1982 added_eqs = 0; 1981 added_eqs = 0;
@@ -2228,7 +2227,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
2228 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; 2227 ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
2229 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count, 2228 err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
2230 MLX4_IB_UC_STEER_QPN_ALIGN, 2229 MLX4_IB_UC_STEER_QPN_ALIGN,
2231 &ibdev->steer_qpn_base); 2230 &ibdev->steer_qpn_base, 0);
2232 if (err) 2231 if (err)
2233 goto err_counter; 2232 goto err_counter;
2234 2233
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9c5150c3cb31..cf000b7ad64f 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -802,16 +802,21 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
802 } 802 }
803 } 803 }
804 } else { 804 } else {
805 /* Raw packet QPNs must be aligned to 8 bits. If not, the WQE 805 /* Raw packet QPNs may not have bits 6,7 set in their qp_num;
806 * BlueFlame setup flow wrongly causes VLAN insertion. */ 806 * otherwise, the WQE BlueFlame setup flow wrongly causes
807 * VLAN insertion. */
807 if (init_attr->qp_type == IB_QPT_RAW_PACKET) 808 if (init_attr->qp_type == IB_QPT_RAW_PACKET)
808 err = mlx4_qp_reserve_range(dev->dev, 1, 1 << 8, &qpn); 809 err = mlx4_qp_reserve_range(dev->dev, 1, 1, &qpn,
810 (init_attr->cap.max_send_wr ?
811 MLX4_RESERVE_ETH_BF_QP : 0) |
812 (init_attr->cap.max_recv_wr ?
813 MLX4_RESERVE_A0_QP : 0));
809 else 814 else
810 if (qp->flags & MLX4_IB_QP_NETIF) 815 if (qp->flags & MLX4_IB_QP_NETIF)
811 err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn); 816 err = mlx4_ib_steer_qp_alloc(dev, 1, &qpn);
812 else 817 else
813 err = mlx4_qp_reserve_range(dev->dev, 1, 1, 818 err = mlx4_qp_reserve_range(dev->dev, 1, 1,
814 &qpn); 819 &qpn, 0);
815 if (err) 820 if (err)
816 goto err_proxy; 821 goto err_proxy;
817 } 822 }
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 10cfce5119a9..c463e7bba5f4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -805,14 +805,14 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
805 } 805 }
806 806
807 807
808 mlx5_vfree(cqb); 808 kvfree(cqb);
809 return &cq->ibcq; 809 return &cq->ibcq;
810 810
811err_cmd: 811err_cmd:
812 mlx5_core_destroy_cq(dev->mdev, &cq->mcq); 812 mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
813 813
814err_cqb: 814err_cqb:
815 mlx5_vfree(cqb); 815 kvfree(cqb);
816 if (context) 816 if (context)
817 destroy_cq_user(cq, context); 817 destroy_cq_user(cq, context);
818 else 818 else
@@ -1159,11 +1159,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
1159 } 1159 }
1160 mutex_unlock(&cq->resize_mutex); 1160 mutex_unlock(&cq->resize_mutex);
1161 1161
1162 mlx5_vfree(in); 1162 kvfree(in);
1163 return 0; 1163 return 0;
1164 1164
1165ex_alloc: 1165ex_alloc:
1166 mlx5_vfree(in); 1166 kvfree(in);
1167 1167
1168ex_resize: 1168ex_resize:
1169 if (udata) 1169 if (udata)
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 8ee7cb46e059..5a80dd993761 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -159,6 +159,9 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
159 sizeof(*in), reg_mr_callback, 159 sizeof(*in), reg_mr_callback,
160 mr, &mr->out); 160 mr, &mr->out);
161 if (err) { 161 if (err) {
162 spin_lock_irq(&ent->lock);
163 ent->pending--;
164 spin_unlock_irq(&ent->lock);
162 mlx5_ib_warn(dev, "create mkey failed %d\n", err); 165 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
163 kfree(mr); 166 kfree(mr);
164 break; 167 break;
@@ -853,14 +856,14 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
853 goto err_2; 856 goto err_2;
854 } 857 }
855 mr->umem = umem; 858 mr->umem = umem;
856 mlx5_vfree(in); 859 kvfree(in);
857 860
858 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key); 861 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
859 862
860 return mr; 863 return mr;
861 864
862err_2: 865err_2:
863 mlx5_vfree(in); 866 kvfree(in);
864 867
865err_1: 868err_1:
866 kfree(mr); 869 kfree(mr);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index e261a53f9a02..1cae1c7132b4 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -647,7 +647,7 @@ err_unmap:
647 mlx5_ib_db_unmap_user(context, &qp->db); 647 mlx5_ib_db_unmap_user(context, &qp->db);
648 648
649err_free: 649err_free:
650 mlx5_vfree(*in); 650 kvfree(*in);
651 651
652err_umem: 652err_umem:
653 if (qp->umem) 653 if (qp->umem)
@@ -761,7 +761,7 @@ err_wrid:
761 kfree(qp->rq.wrid); 761 kfree(qp->rq.wrid);
762 762
763err_free: 763err_free:
764 mlx5_vfree(*in); 764 kvfree(*in);
765 765
766err_buf: 766err_buf:
767 mlx5_buf_free(dev->mdev, &qp->buf); 767 mlx5_buf_free(dev->mdev, &qp->buf);
@@ -971,7 +971,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
971 goto err_create; 971 goto err_create;
972 } 972 }
973 973
974 mlx5_vfree(in); 974 kvfree(in);
975 /* Hardware wants QPN written in big-endian order (after 975 /* Hardware wants QPN written in big-endian order (after
976 * shifting) for send doorbell. Precompute this value to save 976 * shifting) for send doorbell. Precompute this value to save
977 * a little bit when posting sends. 977 * a little bit when posting sends.
@@ -988,7 +988,7 @@ err_create:
988 else if (qp->create_type == MLX5_QP_KERNEL) 988 else if (qp->create_type == MLX5_QP_KERNEL)
989 destroy_qp_kernel(dev, qp); 989 destroy_qp_kernel(dev, qp);
990 990
991 mlx5_vfree(in); 991 kvfree(in);
992 return err; 992 return err;
993} 993}
994 994
@@ -1011,9 +1011,14 @@ static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv
1011 } 1011 }
1012 } else { 1012 } else {
1013 spin_lock_irq(&send_cq->lock); 1013 spin_lock_irq(&send_cq->lock);
1014 __acquire(&recv_cq->lock);
1014 } 1015 }
1015 } else if (recv_cq) { 1016 } else if (recv_cq) {
1016 spin_lock_irq(&recv_cq->lock); 1017 spin_lock_irq(&recv_cq->lock);
1018 __acquire(&send_cq->lock);
1019 } else {
1020 __acquire(&send_cq->lock);
1021 __acquire(&recv_cq->lock);
1017 } 1022 }
1018} 1023}
1019 1024
@@ -1033,10 +1038,15 @@ static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *re
1033 spin_unlock_irq(&recv_cq->lock); 1038 spin_unlock_irq(&recv_cq->lock);
1034 } 1039 }
1035 } else { 1040 } else {
1041 __release(&recv_cq->lock);
1036 spin_unlock_irq(&send_cq->lock); 1042 spin_unlock_irq(&send_cq->lock);
1037 } 1043 }
1038 } else if (recv_cq) { 1044 } else if (recv_cq) {
1045 __release(&send_cq->lock);
1039 spin_unlock_irq(&recv_cq->lock); 1046 spin_unlock_irq(&recv_cq->lock);
1047 } else {
1048 __release(&recv_cq->lock);
1049 __release(&send_cq->lock);
1040 } 1050 }
1041} 1051}
1042 1052
@@ -2411,7 +2421,7 @@ static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2411 2421
2412static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, 2422static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2413 struct mlx5_wqe_ctrl_seg **ctrl, 2423 struct mlx5_wqe_ctrl_seg **ctrl,
2414 struct ib_send_wr *wr, int *idx, 2424 struct ib_send_wr *wr, unsigned *idx,
2415 int *size, int nreq) 2425 int *size, int nreq)
2416{ 2426{
2417 int err = 0; 2427 int err = 0;
@@ -2737,6 +2747,8 @@ out:
2737 2747
2738 if (bf->need_lock) 2748 if (bf->need_lock)
2739 spin_lock(&bf->lock); 2749 spin_lock(&bf->lock);
2750 else
2751 __acquire(&bf->lock);
2740 2752
2741 /* TBD enable WC */ 2753 /* TBD enable WC */
2742 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) { 2754 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
@@ -2753,6 +2765,8 @@ out:
2753 bf->offset ^= bf->buf_size; 2765 bf->offset ^= bf->buf_size;
2754 if (bf->need_lock) 2766 if (bf->need_lock)
2755 spin_unlock(&bf->lock); 2767 spin_unlock(&bf->lock);
2768 else
2769 __release(&bf->lock);
2756 } 2770 }
2757 2771
2758 spin_unlock_irqrestore(&qp->sq.lock, flags); 2772 spin_unlock_irqrestore(&qp->sq.lock, flags);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 97cc1baaa8e3..41fec66217dd 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -141,7 +141,7 @@ static int create_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq,
141 return 0; 141 return 0;
142 142
143err_in: 143err_in:
144 mlx5_vfree(*in); 144 kvfree(*in);
145 145
146err_umem: 146err_umem:
147 ib_umem_release(srq->umem); 147 ib_umem_release(srq->umem);
@@ -209,7 +209,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
209 return 0; 209 return 0;
210 210
211err_in: 211err_in:
212 mlx5_vfree(*in); 212 kvfree(*in);
213 213
214err_buf: 214err_buf:
215 mlx5_buf_free(dev->mdev, &srq->buf); 215 mlx5_buf_free(dev->mdev, &srq->buf);
@@ -306,7 +306,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
306 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn); 306 in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
307 in->ctx.db_record = cpu_to_be64(srq->db.dma); 307 in->ctx.db_record = cpu_to_be64(srq->db.dma);
308 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen); 308 err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
309 mlx5_vfree(in); 309 kvfree(in);
310 if (err) { 310 if (err) {
311 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err); 311 mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
312 goto err_usr_kern_srq; 312 goto err_usr_kern_srq;
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 3effa931fce2..10641b7816f4 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
116 /* 116 /*
117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
118 * work-around for RDMA_READ.. 118 * work-around for RDMA_READs with ConnectX-2.
119 *
120 * Also, still make sure to have at least two SGEs for
121 * outgoing control PDU responses.
119 */ 122 */
120 attr.cap.max_send_sge = device->dev_attr.max_sge - 2; 123 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
121 isert_conn->max_sge = attr.cap.max_send_sge; 124 isert_conn->max_sge = attr.cap.max_send_sge;
122 125
123 attr.cap.max_recv_sge = 1; 126 attr.cap.max_recv_sge = 1;
@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device)
225 struct isert_cq_desc *cq_desc; 228 struct isert_cq_desc *cq_desc;
226 struct ib_device_attr *dev_attr; 229 struct ib_device_attr *dev_attr;
227 int ret = 0, i, j; 230 int ret = 0, i, j;
231 int max_rx_cqe, max_tx_cqe;
228 232
229 dev_attr = &device->dev_attr; 233 dev_attr = &device->dev_attr;
230 ret = isert_query_device(ib_dev, dev_attr); 234 ret = isert_query_device(ib_dev, dev_attr);
231 if (ret) 235 if (ret)
232 return ret; 236 return ret;
233 237
238 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
239 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
240
234 /* asign function handlers */ 241 /* asign function handlers */
235 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 242 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
236 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 243 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
272 isert_cq_rx_callback, 279 isert_cq_rx_callback,
273 isert_cq_event_callback, 280 isert_cq_event_callback,
274 (void *)&cq_desc[i], 281 (void *)&cq_desc[i],
275 ISER_MAX_RX_CQ_LEN, i); 282 max_rx_cqe, i);
276 if (IS_ERR(device->dev_rx_cq[i])) { 283 if (IS_ERR(device->dev_rx_cq[i])) {
277 ret = PTR_ERR(device->dev_rx_cq[i]); 284 ret = PTR_ERR(device->dev_rx_cq[i]);
278 device->dev_rx_cq[i] = NULL; 285 device->dev_rx_cq[i] = NULL;
@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device)
284 isert_cq_tx_callback, 291 isert_cq_tx_callback,
285 isert_cq_event_callback, 292 isert_cq_event_callback,
286 (void *)&cq_desc[i], 293 (void *)&cq_desc[i],
287 ISER_MAX_TX_CQ_LEN, i); 294 max_tx_cqe, i);
288 if (IS_ERR(device->dev_tx_cq[i])) { 295 if (IS_ERR(device->dev_tx_cq[i])) {
289 ret = PTR_ERR(device->dev_tx_cq[i]); 296 ret = PTR_ERR(device->dev_tx_cq[i]);
290 device->dev_tx_cq[i] = NULL; 297 device->dev_tx_cq[i] = NULL;
@@ -803,14 +810,25 @@ wake_up:
803 complete(&isert_conn->conn_wait); 810 complete(&isert_conn->conn_wait);
804} 811}
805 812
806static void 813static int
807isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) 814isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
808{ 815{
809 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; 816 struct isert_conn *isert_conn;
817
818 if (!cma_id->qp) {
819 struct isert_np *isert_np = cma_id->context;
820
821 isert_np->np_cm_id = NULL;
822 return -1;
823 }
824
825 isert_conn = (struct isert_conn *)cma_id->context;
810 826
811 isert_conn->disconnect = disconnect; 827 isert_conn->disconnect = disconnect;
812 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 828 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
813 schedule_work(&isert_conn->conn_logout_work); 829 schedule_work(&isert_conn->conn_logout_work);
830
831 return 0;
814} 832}
815 833
816static int 834static int
@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
825 switch (event->event) { 843 switch (event->event) {
826 case RDMA_CM_EVENT_CONNECT_REQUEST: 844 case RDMA_CM_EVENT_CONNECT_REQUEST:
827 ret = isert_connect_request(cma_id, event); 845 ret = isert_connect_request(cma_id, event);
846 if (ret)
847 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
848 event->event, ret);
828 break; 849 break;
829 case RDMA_CM_EVENT_ESTABLISHED: 850 case RDMA_CM_EVENT_ESTABLISHED:
830 isert_connected_handler(cma_id); 851 isert_connected_handler(cma_id);
@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
834 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 855 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
835 disconnect = true; 856 disconnect = true;
836 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 857 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
837 isert_disconnected_handler(cma_id, disconnect); 858 ret = isert_disconnected_handler(cma_id, disconnect);
838 break; 859 break;
839 case RDMA_CM_EVENT_CONNECT_ERROR: 860 case RDMA_CM_EVENT_CONNECT_ERROR:
840 default: 861 default:
@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
842 break; 863 break;
843 } 864 }
844 865
845 if (ret != 0) {
846 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
847 event->event, ret);
848 dump_stack();
849 }
850
851 return ret; 866 return ret;
852} 867}
853 868
@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np)
3190{ 3205{
3191 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3206 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3192 3207
3193 rdma_destroy_id(isert_np->np_cm_id); 3208 if (isert_np->np_cm_id)
3209 rdma_destroy_id(isert_np->np_cm_id);
3194 3210
3195 np->np_context = NULL; 3211 np->np_context = NULL;
3196 kfree(isert_np); 3212 kfree(isert_np);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 3a0ca61b02c8..eb694ddad79f 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2092 if (!qp_init) 2092 if (!qp_init)
2093 goto out; 2093 goto out;
2094 2094
2095retry:
2095 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, 2096 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2096 ch->rq_size + srp_sq_size, 0); 2097 ch->rq_size + srp_sq_size, 0);
2097 if (IS_ERR(ch->cq)) { 2098 if (IS_ERR(ch->cq)) {
@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2115 ch->qp = ib_create_qp(sdev->pd, qp_init); 2116 ch->qp = ib_create_qp(sdev->pd, qp_init);
2116 if (IS_ERR(ch->qp)) { 2117 if (IS_ERR(ch->qp)) {
2117 ret = PTR_ERR(ch->qp); 2118 ret = PTR_ERR(ch->qp);
2119 if (ret == -ENOMEM) {
2120 srp_sq_size /= 2;
2121 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
2122 ib_destroy_cq(ch->cq);
2123 goto retry;
2124 }
2125 }
2118 printk(KERN_ERR "failed to create_qp ret= %d\n", ret); 2126 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2119 goto err_destroy_cq; 2127 goto err_destroy_cq;
2120 } 2128 }