aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/qp.h3
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c86
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c3
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/main.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c40
-rw-r--r--drivers/infiniband/hw/mlx4/srq.c8
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c36
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_qp.c15
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h4
15 files changed, 123 insertions, 101 deletions
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 2ba00b89df6a..94b54850ec75 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr)
12847 /* clear from the handled mask of the general interrupt */ 12847 /* clear from the handled mask of the general interrupt */
12848 m = isrc / 64; 12848 m = isrc / 64;
12849 n = isrc % 64; 12849 n = isrc % 64;
12850 dd->gi_mask[m] &= ~((u64)1 << n); 12850 if (likely(m < CCE_NUM_INT_CSRS)) {
12851 dd->gi_mask[m] &= ~((u64)1 << n);
12852 } else {
12853 dd_dev_err(dd, "remap interrupt err\n");
12854 return;
12855 }
12851 12856
12852 /* direct the chip source to the given MSI-X interrupt */ 12857 /* direct the chip source to the given MSI-X interrupt */
12853 m = isrc / 8; 12858 m = isrc / 8;
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index 650305cc0373..1a7af9f60c13 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter)
647 qp->pid); 647 qp->pid);
648} 648}
649 649
650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 650void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
651 gfp_t gfp)
652{ 651{
653 struct hfi1_qp_priv *priv; 652 struct hfi1_qp_priv *priv;
654 653
655 priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); 654 priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node);
656 if (!priv) 655 if (!priv)
657 return ERR_PTR(-ENOMEM); 656 return ERR_PTR(-ENOMEM);
658 657
659 priv->owner = qp; 658 priv->owner = qp;
660 659
661 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, 660 priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL,
662 rdi->dparms.node); 661 rdi->dparms.node);
663 if (!priv->s_ahg) { 662 if (!priv->s_ahg) {
664 kfree(priv); 663 kfree(priv);
diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h
index 1eb9cd7b8c19..6fe542b6a927 100644
--- a/drivers/infiniband/hw/hfi1/qp.h
+++ b/drivers/infiniband/hw/hfi1/qp.h
@@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp);
123/* 123/*
124 * Functions provided by hfi1 driver for rdmavt to use 124 * Functions provided by hfi1 driver for rdmavt to use
125 */ 125 */
126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, 126void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
127 gfp_t gfp);
128void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 127void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
129unsigned free_all_qps(struct rvt_dev_info *rdi); 128unsigned free_all_qps(struct rvt_dev_info *rdi);
130void notify_qp_reset(struct rvt_qp *qp); 129void notify_qp_reset(struct rvt_qp *qp);
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index 37d5d29597a4..23fad6d96944 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
228 switch (wr->opcode) { 228 switch (wr->opcode) {
229 case IB_WR_RDMA_READ: 229 case IB_WR_RDMA_READ:
230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; 230 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ;
231 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 231 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
232 atomic_wr(wr)->rkey); 232 rdma_wr(wr)->rkey);
233 break; 233 break;
234 case IB_WR_RDMA_WRITE: 234 case IB_WR_RDMA_WRITE:
235 case IB_WR_RDMA_WRITE_WITH_IMM: 235 case IB_WR_RDMA_WRITE_WITH_IMM:
236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; 236 ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE;
237 set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, 237 set_raddr_seg(wqe, rdma_wr(wr)->remote_addr,
238 atomic_wr(wr)->rkey); 238 rdma_wr(wr)->rkey);
239 break; 239 break;
240 case IB_WR_SEND: 240 case IB_WR_SEND:
241 case IB_WR_SEND_WITH_INV: 241 case IB_WR_SEND_WITH_INV:
@@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
661 union ib_gid dgid; 661 union ib_gid dgid;
662 u64 subnet_prefix; 662 u64 subnet_prefix;
663 int attr_mask = 0; 663 int attr_mask = 0;
664 int i; 664 int i, j;
665 int ret; 665 int ret;
666 u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 };
666 u8 phy_port; 667 u8 phy_port;
668 u8 port = 0;
667 u8 sl; 669 u8 sl;
668 670
669 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 671 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
@@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
709 attr.rnr_retry = 7; 711 attr.rnr_retry = 7;
710 attr.timeout = 0x12; 712 attr.timeout = 0x12;
711 attr.path_mtu = IB_MTU_256; 713 attr.path_mtu = IB_MTU_256;
714 attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
712 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); 715 rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0);
713 rdma_ah_set_static_rate(&attr.ah_attr, 3); 716 rdma_ah_set_static_rate(&attr.ah_attr, 3);
714 717
715 subnet_prefix = cpu_to_be64(0xfe80000000000000LL); 718 subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
716 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 719 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
720 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
721 (i % HNS_ROCE_MAX_PORTS);
722 sl = i / HNS_ROCE_MAX_PORTS;
723
724 for (j = 0; j < caps->num_ports; j++) {
725 if (hr_dev->iboe.phy_port[j] == phy_port) {
726 queue_en[i] = 1;
727 port = j;
728 break;
729 }
730 }
731
732 if (!queue_en[i])
733 continue;
734
717 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); 735 free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd);
718 if (IS_ERR(free_mr->mr_free_qp[i])) { 736 if (IS_ERR(free_mr->mr_free_qp[i])) {
719 dev_err(dev, "Create loop qp failed!\n"); 737 dev_err(dev, "Create loop qp failed!\n");
@@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
721 } 739 }
722 hr_qp = free_mr->mr_free_qp[i]; 740 hr_qp = free_mr->mr_free_qp[i];
723 741
724 sl = i / caps->num_ports; 742 hr_qp->port = port;
725
726 if (caps->num_ports == HNS_ROCE_MAX_PORTS)
727 phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) :
728 (i % caps->num_ports);
729 else
730 phy_port = i % caps->num_ports;
731
732 hr_qp->port = phy_port + 1;
733 hr_qp->phy_port = phy_port; 743 hr_qp->phy_port = phy_port;
734 hr_qp->ibqp.qp_type = IB_QPT_RC; 744 hr_qp->ibqp.qp_type = IB_QPT_RC;
735 hr_qp->ibqp.device = &hr_dev->ib_dev; 745 hr_qp->ibqp.device = &hr_dev->ib_dev;
@@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev)
739 hr_qp->ibqp.recv_cq = cq; 749 hr_qp->ibqp.recv_cq = cq;
740 hr_qp->ibqp.send_cq = cq; 750 hr_qp->ibqp.send_cq = cq;
741 751
742 rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); 752 rdma_ah_set_port_num(&attr.ah_attr, port + 1);
743 rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); 753 rdma_ah_set_sl(&attr.ah_attr, sl);
744 attr.port_num = phy_port + 1; 754 attr.port_num = port + 1;
745 755
746 attr.dest_qp_num = hr_qp->qpn; 756 attr.dest_qp_num = hr_qp->qpn;
747 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), 757 memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr),
748 hr_dev->dev_addr[phy_port], 758 hr_dev->dev_addr[port],
749 MAC_ADDR_OCTET_NUM); 759 MAC_ADDR_OCTET_NUM);
750 760
751 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); 761 memcpy(&dgid.raw, &subnet_prefix, sizeof(u64));
752 memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); 762 memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3);
753 memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); 763 memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3);
754 dgid.raw[11] = 0xff; 764 dgid.raw[11] = 0xff;
755 dgid.raw[12] = 0xfe; 765 dgid.raw[12] = 0xfe;
756 dgid.raw[8] ^= 2; 766 dgid.raw[8] ^= 2;
757 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); 767 rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw);
758 attr_mask |= IB_QP_PORT;
759 768
760 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, 769 ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask,
761 IB_QPS_RESET, IB_QPS_INIT); 770 IB_QPS_RESET, IB_QPS_INIT);
@@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev)
812 821
813 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 822 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
814 hr_qp = free_mr->mr_free_qp[i]; 823 hr_qp = free_mr->mr_free_qp[i];
824 if (!hr_qp)
825 continue;
826
815 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); 827 ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp);
816 if (ret) 828 if (ret)
817 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", 829 dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n",
@@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
963 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; 975 msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies;
964 int i; 976 int i;
965 int ret; 977 int ret;
966 int ne; 978 int ne = 0;
967 979
968 mr_work = container_of(work, struct hns_roce_mr_free_work, work); 980 mr_work = container_of(work, struct hns_roce_mr_free_work, work);
969 hr_mr = (struct hns_roce_mr *)mr_work->mr; 981 hr_mr = (struct hns_roce_mr *)mr_work->mr;
@@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
976 988
977 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { 989 for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) {
978 hr_qp = free_mr->mr_free_qp[i]; 990 hr_qp = free_mr->mr_free_qp[i];
991 if (!hr_qp)
992 continue;
993 ne++;
994
979 ret = hns_roce_v1_send_lp_wqe(hr_qp); 995 ret = hns_roce_v1_send_lp_wqe(hr_qp);
980 if (ret) { 996 if (ret) {
981 dev_err(dev, 997 dev_err(dev,
@@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
985 } 1001 }
986 } 1002 }
987 1003
988 ne = HNS_ROCE_V1_RESV_QP;
989 do { 1004 do {
990 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); 1005 ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc);
991 if (ret < 0) { 1006 if (ret < 0) {
@@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work)
995 goto free_work; 1010 goto free_work;
996 } 1011 }
997 ne -= ret; 1012 ne -= ret;
998 msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); 1013 usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000,
1014 (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000);
999 } while (ne && time_before_eq(jiffies, end)); 1015 } while (ne && time_before_eq(jiffies, end));
1000 1016
1001 if (ne != 0) 1017 if (ne != 0)
@@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
2181 } 2197 }
2182 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; 2198 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
2183 ++wq->tail; 2199 ++wq->tail;
2184 } else { 2200 } else {
2185 /* RQ conrespond to CQE */ 2201 /* RQ conrespond to CQE */
2186 wc->byte_len = le32_to_cpu(cqe->byte_cnt); 2202 wc->byte_len = le32_to_cpu(cqe->byte_cnt);
2187 opcode = roce_get_field(cqe->cqe_byte_4, 2203 opcode = roce_get_field(cqe->cqe_byte_4,
@@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev,
3533 old_cnt = roce_get_field(old_send, 3549 old_cnt = roce_get_field(old_send,
3534 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3550 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3535 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); 3551 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S);
3536 if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) 3552 if (cur_cnt - old_cnt >
3553 SDB_ST_CMP_VAL) {
3537 success_flags = 1; 3554 success_flags = 1;
3538 else { 3555 } else {
3539 send_ptr = roce_get_field(old_send, 3556 send_ptr =
3557 roce_get_field(old_send,
3540 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, 3558 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M,
3541 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + 3559 ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) +
3542 roce_get_field(sdb_retry_cnt, 3560 roce_get_field(sdb_retry_cnt,
@@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3641 struct hns_roce_dev *hr_dev; 3659 struct hns_roce_dev *hr_dev;
3642 struct hns_roce_qp *hr_qp; 3660 struct hns_roce_qp *hr_qp;
3643 struct device *dev; 3661 struct device *dev;
3662 unsigned long qpn;
3644 int ret; 3663 int ret;
3645 3664
3646 qp_work_entry = container_of(work, struct hns_roce_qp_work, work); 3665 qp_work_entry = container_of(work, struct hns_roce_qp_work, work);
@@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3648 dev = &hr_dev->pdev->dev; 3667 dev = &hr_dev->pdev->dev;
3649 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; 3668 priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv;
3650 hr_qp = qp_work_entry->qp; 3669 hr_qp = qp_work_entry->qp;
3670 qpn = hr_qp->qpn;
3651 3671
3652 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); 3672 dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn);
3653 3673
3654 qp_work_entry->sche_cnt++; 3674 qp_work_entry->sche_cnt++;
3655 3675
@@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3660 &qp_work_entry->db_wait_stage); 3680 &qp_work_entry->db_wait_stage);
3661 if (ret) { 3681 if (ret) {
3662 dev_err(dev, "Check QP(0x%lx) db process status failed!\n", 3682 dev_err(dev, "Check QP(0x%lx) db process status failed!\n",
3663 hr_qp->qpn); 3683 qpn);
3664 return; 3684 return;
3665 } 3685 }
3666 3686
@@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3674 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, 3694 ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state,
3675 IB_QPS_RESET); 3695 IB_QPS_RESET);
3676 if (ret) { 3696 if (ret) {
3677 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); 3697 dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn);
3678 return; 3698 return;
3679 } 3699 }
3680 3700
@@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work)
3683 3703
3684 if (hr_qp->ibqp.qp_type == IB_QPT_RC) { 3704 if (hr_qp->ibqp.qp_type == IB_QPT_RC) {
3685 /* RC QP, release QPN */ 3705 /* RC QP, release QPN */
3686 hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); 3706 hns_roce_release_range_qp(hr_dev, qpn, 1);
3687 kfree(hr_qp); 3707 kfree(hr_qp);
3688 } else 3708 } else
3689 kfree(hr_to_hr_sqp(hr_qp)); 3709 kfree(hr_to_hr_sqp(hr_qp));
3690 3710
3691 kfree(qp_work_entry); 3711 kfree(qp_work_entry);
3692 3712
3693 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); 3713 dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn);
3694} 3714}
3695 3715
3696int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) 3716int hns_roce_v1_destroy_qp(struct ib_qp *ibqp)
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index c3b41f95e70a..d9777b662eba 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
125 return -ENODEV; 125 return -ENODEV;
126 } 126 }
127 127
128 spin_lock_bh(&hr_dev->iboe.lock);
129
130 switch (event) { 128 switch (event) {
131 case NETDEV_UP: 129 case NETDEV_UP:
132 case NETDEV_CHANGE: 130 case NETDEV_CHANGE:
@@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port,
144 break; 142 break;
145 } 143 }
146 144
147 spin_unlock_bh(&hr_dev->iboe.lock);
148 return 0; 145 return 0;
149} 146}
150 147
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 4f5a143fc0a7..ff931c580557 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
102 int err; 102 int err;
103 103
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, 104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
105 PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); 105 PAGE_SIZE * 2, &buf->buf);
106 106
107 if (err) 107 if (err)
108 goto out; 108 goto out;
@@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *
113 if (err) 113 if (err)
114 goto err_buf; 114 goto err_buf;
115 115
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); 116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
117 if (err) 117 if (err)
118 goto err_mtt; 118 goto err_mtt;
119 119
@@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev,
219 219
220 uar = &to_mucontext(context)->uar; 220 uar = &to_mucontext(context)->uar;
221 } else { 221 } else {
222 err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); 222 err = mlx4_db_alloc(dev->dev, &cq->db, 1);
223 if (err) 223 if (err)
224 goto err_cq; 224 goto err_cq;
225 225
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75b2f7d4cd95..d1b43cbbfea7 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext)
1155 * call to mlx4_ib_vma_close. 1155 * call to mlx4_ib_vma_close.
1156 */ 1156 */
1157 put_task_struct(owning_process); 1157 put_task_struct(owning_process);
1158 msleep(1); 1158 usleep_range(1000, 2000);
1159 owning_process = get_pid_task(ibcontext->tgid, 1159 owning_process = get_pid_task(ibcontext->tgid,
1160 PIDTYPE_PID); 1160 PIDTYPE_PID);
1161 if (!owning_process || 1161 if (!owning_process ||
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 3405e947dc1e..b73f89700ef9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy
1091 if (!count) 1091 if (!count)
1092 break; 1092 break;
1093 1093
1094 msleep(1); 1094 usleep_range(1000, 2000);
1095 } while (time_after(end, jiffies)); 1095 } while (time_after(end, jiffies));
1096 1096
1097 flush_workqueue(ctx->mcg_wq); 1097 flush_workqueue(ctx->mcg_wq);
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c2b9cbf4da05..9db82e67e959 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags {
185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, 185 MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, 186 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, 187 MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
188 MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
189 188
190 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ 189 /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
191 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, 190 MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 996e9058e515..75c0e6c5dd56 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev,
634 634
635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, 635static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
636 struct ib_qp_init_attr *init_attr, 636 struct ib_qp_init_attr *init_attr,
637 struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, 637 struct ib_udata *udata, int sqpn,
638 gfp_t gfp) 638 struct mlx4_ib_qp **caller_qp)
639{ 639{
640 int qpn; 640 int qpn;
641 int err; 641 int err;
@@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || 691 if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | 692 (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { 693 MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
694 sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); 694 sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
695 if (!sqp) 695 if (!sqp)
696 return -ENOMEM; 696 return -ENOMEM;
697 qp = &sqp->qp; 697 qp = &sqp->qp;
698 qp->pri.vid = 0xFFFF; 698 qp->pri.vid = 0xFFFF;
699 qp->alt.vid = 0xFFFF; 699 qp->alt.vid = 0xFFFF;
700 } else { 700 } else {
701 qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); 701 qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
702 if (!qp) 702 if (!qp)
703 return -ENOMEM; 703 return -ENOMEM;
704 qp->pri.vid = 0xFFFF; 704 qp->pri.vid = 0xFFFF;
@@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
780 goto err; 780 goto err;
781 781
782 if (qp_has_rq(init_attr)) { 782 if (qp_has_rq(init_attr)) {
783 err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); 783 err = mlx4_db_alloc(dev->dev, &qp->db, 0);
784 if (err) 784 if (err)
785 goto err; 785 goto err;
786 786
@@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
788 } 788 }
789 789
790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, 790 if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
791 &qp->buf, gfp)) { 791 &qp->buf)) {
792 memcpy(&init_attr->cap, &backup_cap, 792 memcpy(&init_attr->cap, &backup_cap,
793 sizeof(backup_cap)); 793 sizeof(backup_cap));
794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, 794 err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
@@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
797 goto err_db; 797 goto err_db;
798 798
799 if (mlx4_buf_alloc(dev->dev, qp->buf_size, 799 if (mlx4_buf_alloc(dev->dev, qp->buf_size,
800 PAGE_SIZE * 2, &qp->buf, gfp)) { 800 PAGE_SIZE * 2, &qp->buf)) {
801 err = -ENOMEM; 801 err = -ENOMEM;
802 goto err_db; 802 goto err_db;
803 } 803 }
@@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
808 if (err) 808 if (err)
809 goto err_buf; 809 goto err_buf;
810 810
811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); 811 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
812 if (err) 812 if (err)
813 goto err_mtt; 813 goto err_mtt;
814 814
815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), 815 qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
816 gfp | __GFP_NOWARN); 816 GFP_KERNEL | __GFP_NOWARN);
817 if (!qp->sq.wrid) 817 if (!qp->sq.wrid)
818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), 818 qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
819 gfp, PAGE_KERNEL); 819 GFP_KERNEL, PAGE_KERNEL);
820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), 820 qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
821 gfp | __GFP_NOWARN); 821 GFP_KERNEL | __GFP_NOWARN);
822 if (!qp->rq.wrid) 822 if (!qp->rq.wrid)
823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), 823 qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
824 gfp, PAGE_KERNEL); 824 GFP_KERNEL, PAGE_KERNEL);
825 if (!qp->sq.wrid || !qp->rq.wrid) { 825 if (!qp->sq.wrid || !qp->rq.wrid) {
826 err = -ENOMEM; 826 err = -ENOMEM;
827 goto err_wrid; 827 goto err_wrid;
@@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) 859 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 860 qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
861 861
862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); 862 err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
863 if (err) 863 if (err)
864 goto err_qpn; 864 goto err_qpn;
865 865
@@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1127 int err; 1127 int err;
1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; 1128 int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
1129 u16 xrcdn = 0; 1129 u16 xrcdn = 0;
1130 gfp_t gfp;
1131 1130
1132 gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
1133 GFP_NOIO : GFP_KERNEL;
1134 /* 1131 /*
1135 * We only support LSO, vendor flag1, and multicast loopback blocking, 1132 * We only support LSO, vendor flag1, and multicast loopback blocking,
1136 * and only for kernel UD QPs. 1133 * and only for kernel UD QPs.
@@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1140 MLX4_IB_SRIOV_TUNNEL_QP | 1137 MLX4_IB_SRIOV_TUNNEL_QP |
1141 MLX4_IB_SRIOV_SQP | 1138 MLX4_IB_SRIOV_SQP |
1142 MLX4_IB_QP_NETIF | 1139 MLX4_IB_QP_NETIF |
1143 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1140 MLX4_IB_QP_CREATE_ROCE_V2_GSI))
1144 MLX4_IB_QP_CREATE_USE_GFP_NOIO))
1145 return ERR_PTR(-EINVAL); 1141 return ERR_PTR(-EINVAL);
1146 1142
1147 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { 1143 if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
@@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1154 return ERR_PTR(-EINVAL); 1150 return ERR_PTR(-EINVAL);
1155 1151
1156 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | 1152 if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
1157 MLX4_IB_QP_CREATE_USE_GFP_NOIO |
1158 MLX4_IB_QP_CREATE_ROCE_V2_GSI | 1153 MLX4_IB_QP_CREATE_ROCE_V2_GSI |
1159 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && 1154 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
1160 init_attr->qp_type != IB_QPT_UD) || 1155 init_attr->qp_type != IB_QPT_UD) ||
@@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1179 case IB_QPT_RC: 1174 case IB_QPT_RC:
1180 case IB_QPT_UC: 1175 case IB_QPT_UC:
1181 case IB_QPT_RAW_PACKET: 1176 case IB_QPT_RAW_PACKET:
1182 qp = kzalloc(sizeof *qp, gfp); 1177 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1183 if (!qp) 1178 if (!qp)
1184 return ERR_PTR(-ENOMEM); 1179 return ERR_PTR(-ENOMEM);
1185 qp->pri.vid = 0xFFFF; 1180 qp->pri.vid = 0xFFFF;
@@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1188 case IB_QPT_UD: 1183 case IB_QPT_UD:
1189 { 1184 {
1190 err = create_qp_common(to_mdev(pd->device), pd, init_attr, 1185 err = create_qp_common(to_mdev(pd->device), pd, init_attr,
1191 udata, 0, &qp, gfp); 1186 udata, 0, &qp);
1192 if (err) { 1187 if (err) {
1193 kfree(qp); 1188 kfree(qp);
1194 return ERR_PTR(err); 1189 return ERR_PTR(err);
@@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd,
1217 } 1212 }
1218 1213
1219 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, 1214 err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
1220 sqpn, 1215 sqpn, &qp);
1221 &qp, gfp);
1222 if (err) 1216 if (err)
1223 return ERR_PTR(err); 1217 return ERR_PTR(err);
1224 1218
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c
index e32dd58937a8..0facaf5f6d23 100644
--- a/drivers/infiniband/hw/mlx4/srq.c
+++ b/drivers/infiniband/hw/mlx4/srq.c
@@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
135 if (err) 135 if (err)
136 goto err_mtt; 136 goto err_mtt;
137 } else { 137 } else {
138 err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); 138 err = mlx4_db_alloc(dev->dev, &srq->db, 0);
139 if (err) 139 if (err)
140 goto err_srq; 140 goto err_srq;
141 141
142 *srq->db.db = 0; 142 *srq->db.db = 0;
143 143
144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, 144 if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
145 GFP_KERNEL)) { 145 &srq->buf)) {
146 err = -ENOMEM; 146 err = -ENOMEM;
147 goto err_db; 147 goto err_db;
148 } 148 }
@@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd,
167 if (err) 167 if (err)
168 goto err_buf; 168 goto err_buf;
169 169
170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); 170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
171 if (err) 171 if (err)
172 goto err_mtt; 172 goto err_mtt;
173 173
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 763bb5b36144..8ab2f1360a45 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
582 } 582 }
583} 583}
584 584
585static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
586{
587 if (!mlx5_debugfs_root)
588 return;
589
590 debugfs_remove_recursive(dev->cache.root);
591 dev->cache.root = NULL;
592}
593
585static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) 594static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
586{ 595{
587 struct mlx5_mr_cache *cache = &dev->cache; 596 struct mlx5_mr_cache *cache = &dev->cache;
@@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
600 sprintf(ent->name, "%d", ent->order); 609 sprintf(ent->name, "%d", ent->order);
601 ent->dir = debugfs_create_dir(ent->name, cache->root); 610 ent->dir = debugfs_create_dir(ent->name, cache->root);
602 if (!ent->dir) 611 if (!ent->dir)
603 return -ENOMEM; 612 goto err;
604 613
605 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, 614 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
606 &size_fops); 615 &size_fops);
607 if (!ent->fsize) 616 if (!ent->fsize)
608 return -ENOMEM; 617 goto err;
609 618
610 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, 619 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
611 &limit_fops); 620 &limit_fops);
612 if (!ent->flimit) 621 if (!ent->flimit)
613 return -ENOMEM; 622 goto err;
614 623
615 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, 624 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
616 &ent->cur); 625 &ent->cur);
617 if (!ent->fcur) 626 if (!ent->fcur)
618 return -ENOMEM; 627 goto err;
619 628
620 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, 629 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
621 &ent->miss); 630 &ent->miss);
622 if (!ent->fmiss) 631 if (!ent->fmiss)
623 return -ENOMEM; 632 goto err;
624 } 633 }
625 634
626 return 0; 635 return 0;
627} 636err:
628 637 mlx5_mr_cache_debugfs_cleanup(dev);
629static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
630{
631 if (!mlx5_debugfs_root)
632 return;
633 638
634 debugfs_remove_recursive(dev->cache.root); 639 return -ENOMEM;
635} 640}
636 641
637static void delay_time_func(unsigned long ctx) 642static void delay_time_func(unsigned long ctx)
@@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
692 if (err) 697 if (err)
693 mlx5_ib_warn(dev, "cache debugfs failure\n"); 698 mlx5_ib_warn(dev, "cache debugfs failure\n");
694 699
700 /*
701 * We don't want to fail driver if debugfs failed to initialize,
702 * so we are not forwarding error to the user.
703 */
704
695 return 0; 705 return 0;
696} 706}
697 707
@@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
1779 mr->ndescs = sg_nents; 1789 mr->ndescs = sg_nents;
1780 1790
1781 for_each_sg(sgl, sg, sg_nents, i) { 1791 for_each_sg(sgl, sg, sg_nents, i) {
1782 if (unlikely(i > mr->max_descs)) 1792 if (unlikely(i >= mr->max_descs))
1783 break; 1793 break;
1784 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); 1794 klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset);
1785 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); 1795 klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8f9d8b4ad583..b0adf65e4bdb 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) 551 if ((0x0F000100 == (pcs_control_status0 & 0x0F000100))
552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) 552 || (0x0F000100 == (pcs_control_status1 & 0x0F000100)))
553 int_cnt++; 553 int_cnt++;
554 msleep(1); 554 usleep_range(1000, 2000);
555 } 555 }
556 if (int_cnt > 1) { 556 if (int_cnt > 1) {
557 spin_lock_irqsave(&nesadapter->phy_lock, flags); 557 spin_lock_irqsave(&nesadapter->phy_lock, flags);
@@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) {
592 break; 592 break;
593 } 593 }
594 } 594 }
595 msleep(1); 595 usleep_range(1000, 2000);
596 } 596 }
597 } 597 }
598 } 598 }
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 5984981e7dd4..a343e3b5d4cb 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = {
104 104
105}; 105};
106 106
107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, 107static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
108 gfp_t gfp)
109{ 108{
110 unsigned long page = get_zeroed_page(gfp); 109 unsigned long page = get_zeroed_page(GFP_KERNEL);
111 110
112 /* 111 /*
113 * Free the page if someone raced with us installing it. 112 * Free the page if someone raced with us installing it.
@@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map,
126 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. 125 * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
127 */ 126 */
128int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 127int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
129 enum ib_qp_type type, u8 port, gfp_t gfp) 128 enum ib_qp_type type, u8 port)
130{ 129{
131 u32 i, offset, max_scan, qpn; 130 u32 i, offset, max_scan, qpn;
132 struct rvt_qpn_map *map; 131 struct rvt_qpn_map *map;
@@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
160 max_scan = qpt->nmaps - !offset; 159 max_scan = qpt->nmaps - !offset;
161 for (i = 0;;) { 160 for (i = 0;;) {
162 if (unlikely(!map->page)) { 161 if (unlikely(!map->page)) {
163 get_map_page(qpt, map, gfp); 162 get_map_page(qpt, map);
164 if (unlikely(!map->page)) 163 if (unlikely(!map->page))
165 break; 164 break;
166 } 165 }
@@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu)
317 return ib_mtu_enum_to_int(pmtu); 316 return ib_mtu_enum_to_int(pmtu);
318} 317}
319 318
320void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) 319void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp)
321{ 320{
322 struct qib_qp_priv *priv; 321 struct qib_qp_priv *priv;
323 322
324 priv = kzalloc(sizeof(*priv), gfp); 323 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
325 if (!priv) 324 if (!priv)
326 return ERR_PTR(-ENOMEM); 325 return ERR_PTR(-ENOMEM);
327 priv->owner = qp; 326 priv->owner = qp;
328 327
329 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); 328 priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL);
330 if (!priv->s_hdr) { 329 if (!priv->s_hdr) {
331 kfree(priv); 330 kfree(priv);
332 return ERR_PTR(-ENOMEM); 331 return ERR_PTR(-ENOMEM);
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index da0db5485ddc..a52fc67b40d7 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd,
274 * Functions provided by qib driver for rdmavt to use 274 * Functions provided by qib driver for rdmavt to use
275 */ 275 */
276unsigned qib_free_all_qps(struct rvt_dev_info *rdi); 276unsigned qib_free_all_qps(struct rvt_dev_info *rdi);
277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); 277void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp);
278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); 278void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp);
279void qib_notify_qp_reset(struct rvt_qp *qp); 279void qib_notify_qp_reset(struct rvt_qp *qp);
280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, 280int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt,
281 enum ib_qp_type type, u8 port, gfp_t gfp); 281 enum ib_qp_type type, u8 port);
282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); 282void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait);
283#ifdef CONFIG_DEBUG_FS 283#ifdef CONFIG_DEBUG_FS
284 284