diff options
author | Naresh Gottumukkala <bgottumukkala@emulex.com> | 2013-06-10 00:42:39 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2013-06-20 07:52:14 -0400 |
commit | ef99c4c2ed63cb0deb94ea70fb47c2d6294e302e (patch) | |
tree | 487ba67e32d6565dfcc2b9d264e3183abf3b854c | |
parent | b1d58b99194a121a44ec77571f84f62a6ccd6431 (diff) |
RDMA/ocrdma: Replace ocrdma_err with pr_err
Remove private macro ocrdma_err and replace with standard pr_err.
Signed-off-by: Naresh Gottumukkala <bgottumukkala@emulex.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma.h | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_hw.c | 44 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_main.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 92 |
4 files changed, 70 insertions, 74 deletions
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 21d99f6fb367..9d82d097e25a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h | |||
@@ -42,8 +42,6 @@ | |||
42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" | 42 | #define OCRDMA_ROCE_DEV_VERSION "1.0.0" |
43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" | 43 | #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" |
44 | 44 | ||
45 | #define ocrdma_err(format, arg...) printk(KERN_ERR format, ##arg) | ||
46 | |||
47 | #define OCRDMA_MAX_AH 512 | 45 | #define OCRDMA_MAX_AH 512 |
48 | 46 | ||
49 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) | 47 | #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) |
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index f671d5d9ce3a..76c9e192ddcc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c | |||
@@ -731,7 +731,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, | |||
731 | qp_event = 0; | 731 | qp_event = 0; |
732 | srq_event = 0; | 732 | srq_event = 0; |
733 | dev_event = 0; | 733 | dev_event = 0; |
734 | ocrdma_err("%s() unknown type=0x%x\n", __func__, type); | 734 | pr_err("%s() unknown type=0x%x\n", __func__, type); |
735 | break; | 735 | break; |
736 | } | 736 | } |
737 | 737 | ||
@@ -761,8 +761,8 @@ static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe) | |||
761 | if (evt_code == OCRDMA_ASYNC_EVE_CODE) | 761 | if (evt_code == OCRDMA_ASYNC_EVE_CODE) |
762 | ocrdma_dispatch_ibevent(dev, cqe); | 762 | ocrdma_dispatch_ibevent(dev, cqe); |
763 | else | 763 | else |
764 | ocrdma_err("%s(%d) invalid evt code=0x%x\n", | 764 | pr_err("%s(%d) invalid evt code=0x%x\n", __func__, |
765 | __func__, dev->id, evt_code); | 765 | dev->id, evt_code); |
766 | } | 766 | } |
767 | 767 | ||
768 | static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) | 768 | static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) |
@@ -776,8 +776,8 @@ static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe) | |||
776 | dev->mqe_ctx.cmd_done = true; | 776 | dev->mqe_ctx.cmd_done = true; |
777 | wake_up(&dev->mqe_ctx.cmd_wait); | 777 | wake_up(&dev->mqe_ctx.cmd_wait); |
778 | } else | 778 | } else |
779 | ocrdma_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", | 779 | pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n", |
780 | __func__, cqe->tag_lo, dev->mqe_ctx.tag); | 780 | __func__, cqe->tag_lo, dev->mqe_ctx.tag); |
781 | } | 781 | } |
782 | 782 | ||
783 | static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | 783 | static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) |
@@ -796,7 +796,7 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) | |||
796 | else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) | 796 | else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK) |
797 | ocrdma_process_mcqe(dev, cqe); | 797 | ocrdma_process_mcqe(dev, cqe); |
798 | else | 798 | else |
799 | ocrdma_err("%s() cqe->compl is not set.\n", __func__); | 799 | pr_err("%s() cqe->compl is not set.\n", __func__); |
800 | memset(cqe, 0, sizeof(struct ocrdma_mcqe)); | 800 | memset(cqe, 0, sizeof(struct ocrdma_mcqe)); |
801 | ocrdma_mcq_inc_tail(dev); | 801 | ocrdma_mcq_inc_tail(dev); |
802 | } | 802 | } |
@@ -855,7 +855,7 @@ static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx) | |||
855 | 855 | ||
856 | cq = dev->cq_tbl[cq_idx]; | 856 | cq = dev->cq_tbl[cq_idx]; |
857 | if (cq == NULL) { | 857 | if (cq == NULL) { |
858 | ocrdma_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); | 858 | pr_err("%s%d invalid id=0x%x\n", __func__, dev->id, cq_idx); |
859 | return; | 859 | return; |
860 | } | 860 | } |
861 | spin_lock_irqsave(&cq->cq_lock, flags); | 861 | spin_lock_irqsave(&cq->cq_lock, flags); |
@@ -957,7 +957,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) | |||
957 | rsp = ocrdma_get_mqe_rsp(dev); | 957 | rsp = ocrdma_get_mqe_rsp(dev); |
958 | ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); | 958 | ocrdma_copy_le32_to_cpu(mqe, rsp, (sizeof(*mqe))); |
959 | if (cqe_status || ext_status) { | 959 | if (cqe_status || ext_status) { |
960 | ocrdma_err | 960 | pr_err |
961 | ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", | 961 | ("%s() opcode=0x%x, cqe_status=0x%x, ext_status=0x%x\n", |
962 | __func__, | 962 | __func__, |
963 | (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> | 963 | (rsp->u.rsp.subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >> |
@@ -1339,8 +1339,8 @@ int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq, | |||
1339 | if (dpp_cq) | 1339 | if (dpp_cq) |
1340 | return -EINVAL; | 1340 | return -EINVAL; |
1341 | if (entries > dev->attr.max_cqe) { | 1341 | if (entries > dev->attr.max_cqe) { |
1342 | ocrdma_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", | 1342 | pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n", |
1343 | __func__, dev->id, dev->attr.max_cqe, entries); | 1343 | __func__, dev->id, dev->attr.max_cqe, entries); |
1344 | return -EINVAL; | 1344 | return -EINVAL; |
1345 | } | 1345 | } |
1346 | if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) | 1346 | if (dpp_cq && (dev->nic_info.dev_family != OCRDMA_GEN2_FAMILY)) |
@@ -1607,7 +1607,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, | |||
1607 | status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, | 1607 | status = ocrdma_mbx_reg_mr(dev, hwmr, pdid, |
1608 | cur_pbl_cnt, hwmr->pbe_size, last); | 1608 | cur_pbl_cnt, hwmr->pbe_size, last); |
1609 | if (status) { | 1609 | if (status) { |
1610 | ocrdma_err("%s() status=%d\n", __func__, status); | 1610 | pr_err("%s() status=%d\n", __func__, status); |
1611 | return status; | 1611 | return status; |
1612 | } | 1612 | } |
1613 | /* if there is no more pbls to register then exit. */ | 1613 | /* if there is no more pbls to register then exit. */ |
@@ -1630,7 +1630,7 @@ int ocrdma_reg_mr(struct ocrdma_dev *dev, | |||
1630 | break; | 1630 | break; |
1631 | } | 1631 | } |
1632 | if (status) | 1632 | if (status) |
1633 | ocrdma_err("%s() err. status=%d\n", __func__, status); | 1633 | pr_err("%s() err. status=%d\n", __func__, status); |
1634 | 1634 | ||
1635 | return status; | 1635 | return status; |
1636 | } | 1636 | } |
@@ -1827,8 +1827,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, | |||
1827 | status = ocrdma_build_q_conf(&max_wqe_allocated, | 1827 | status = ocrdma_build_q_conf(&max_wqe_allocated, |
1828 | dev->attr.wqe_size, &hw_pages, &hw_page_size); | 1828 | dev->attr.wqe_size, &hw_pages, &hw_page_size); |
1829 | if (status) { | 1829 | if (status) { |
1830 | ocrdma_err("%s() req. max_send_wr=0x%x\n", __func__, | 1830 | pr_err("%s() req. max_send_wr=0x%x\n", __func__, |
1831 | max_wqe_allocated); | 1831 | max_wqe_allocated); |
1832 | return -EINVAL; | 1832 | return -EINVAL; |
1833 | } | 1833 | } |
1834 | qp->sq.max_cnt = max_wqe_allocated; | 1834 | qp->sq.max_cnt = max_wqe_allocated; |
@@ -1877,8 +1877,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, | |||
1877 | status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, | 1877 | status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size, |
1878 | &hw_pages, &hw_page_size); | 1878 | &hw_pages, &hw_page_size); |
1879 | if (status) { | 1879 | if (status) { |
1880 | ocrdma_err("%s() req. max_recv_wr=0x%x\n", __func__, | 1880 | pr_err("%s() req. max_recv_wr=0x%x\n", __func__, |
1881 | attrs->cap.max_recv_wr + 1); | 1881 | attrs->cap.max_recv_wr + 1); |
1882 | return status; | 1882 | return status; |
1883 | } | 1883 | } |
1884 | qp->rq.max_cnt = max_rqe_allocated; | 1884 | qp->rq.max_cnt = max_rqe_allocated; |
@@ -2073,10 +2073,10 @@ mbx_err: | |||
2073 | if (qp->rq.va) | 2073 | if (qp->rq.va) |
2074 | dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); | 2074 | dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa); |
2075 | rq_err: | 2075 | rq_err: |
2076 | ocrdma_err("%s(%d) rq_err\n", __func__, dev->id); | 2076 | pr_err("%s(%d) rq_err\n", __func__, dev->id); |
2077 | dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); | 2077 | dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa); |
2078 | sq_err: | 2078 | sq_err: |
2079 | ocrdma_err("%s(%d) sq_err\n", __func__, dev->id); | 2079 | pr_err("%s(%d) sq_err\n", __func__, dev->id); |
2080 | kfree(cmd); | 2080 | kfree(cmd); |
2081 | return status; | 2081 | return status; |
2082 | } | 2082 | } |
@@ -2113,7 +2113,7 @@ int ocrdma_resolve_dgid(struct ocrdma_dev *dev, union ib_gid *dgid, | |||
2113 | else if (rdma_link_local_addr(&in6)) | 2113 | else if (rdma_link_local_addr(&in6)) |
2114 | rdma_get_ll_mac(&in6, mac_addr); | 2114 | rdma_get_ll_mac(&in6, mac_addr); |
2115 | else { | 2115 | else { |
2116 | ocrdma_err("%s() fail to resolve mac_addr.\n", __func__); | 2116 | pr_err("%s() fail to resolve mac_addr.\n", __func__); |
2117 | return -EINVAL; | 2117 | return -EINVAL; |
2118 | } | 2118 | } |
2119 | return 0; | 2119 | return 0; |
@@ -2348,8 +2348,8 @@ int ocrdma_mbx_create_srq(struct ocrdma_srq *srq, | |||
2348 | dev->attr.rqe_size, | 2348 | dev->attr.rqe_size, |
2349 | &hw_pages, &hw_page_size); | 2349 | &hw_pages, &hw_page_size); |
2350 | if (status) { | 2350 | if (status) { |
2351 | ocrdma_err("%s() req. max_wr=0x%x\n", __func__, | 2351 | pr_err("%s() req. max_wr=0x%x\n", __func__, |
2352 | srq_attr->attr.max_wr); | 2352 | srq_attr->attr.max_wr); |
2353 | status = -EINVAL; | 2353 | status = -EINVAL; |
2354 | goto ret; | 2354 | goto ret; |
2355 | } | 2355 | } |
@@ -2600,7 +2600,7 @@ mq_err: | |||
2600 | ocrdma_destroy_qp_eqs(dev); | 2600 | ocrdma_destroy_qp_eqs(dev); |
2601 | qpeq_err: | 2601 | qpeq_err: |
2602 | ocrdma_destroy_eq(dev, &dev->meq); | 2602 | ocrdma_destroy_eq(dev, &dev->meq); |
2603 | ocrdma_err("%s() status=%d\n", __func__, status); | 2603 | pr_err("%s() status=%d\n", __func__, status); |
2604 | return status; | 2604 | return status; |
2605 | } | 2605 | } |
2606 | 2606 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 48928c8e7774..ded416f1adea 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c | |||
@@ -378,7 +378,7 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) | |||
378 | spin_lock_init(&dev->flush_q_lock); | 378 | spin_lock_init(&dev->flush_q_lock); |
379 | return 0; | 379 | return 0; |
380 | alloc_err: | 380 | alloc_err: |
381 | ocrdma_err("%s(%d) error.\n", __func__, dev->id); | 381 | pr_err("%s(%d) error.\n", __func__, dev->id); |
382 | return -ENOMEM; | 382 | return -ENOMEM; |
383 | } | 383 | } |
384 | 384 | ||
@@ -396,7 +396,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) | |||
396 | 396 | ||
397 | dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); | 397 | dev = (struct ocrdma_dev *)ib_alloc_device(sizeof(struct ocrdma_dev)); |
398 | if (!dev) { | 398 | if (!dev) { |
399 | ocrdma_err("Unable to allocate ib device\n"); | 399 | pr_err("Unable to allocate ib device\n"); |
400 | return NULL; | 400 | return NULL; |
401 | } | 401 | } |
402 | dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); | 402 | dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); |
@@ -437,7 +437,7 @@ init_err: | |||
437 | idr_err: | 437 | idr_err: |
438 | kfree(dev->mbx_cmd); | 438 | kfree(dev->mbx_cmd); |
439 | ib_dealloc_device(&dev->ibdev); | 439 | ib_dealloc_device(&dev->ibdev); |
440 | ocrdma_err("%s() leaving. ret=%d\n", __func__, status); | 440 | pr_err("%s() leaving. ret=%d\n", __func__, status); |
441 | return NULL; | 441 | return NULL; |
442 | } | 442 | } |
443 | 443 | ||
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 38c145b28f5c..882a8198d820 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | |||
@@ -114,8 +114,8 @@ int ocrdma_query_port(struct ib_device *ibdev, | |||
114 | 114 | ||
115 | dev = get_ocrdma_dev(ibdev); | 115 | dev = get_ocrdma_dev(ibdev); |
116 | if (port > 1) { | 116 | if (port > 1) { |
117 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | 117 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, |
118 | dev->id, port); | 118 | dev->id, port); |
119 | return -EINVAL; | 119 | return -EINVAL; |
120 | } | 120 | } |
121 | netdev = dev->nic_info.netdev; | 121 | netdev = dev->nic_info.netdev; |
@@ -155,8 +155,7 @@ int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, | |||
155 | 155 | ||
156 | dev = get_ocrdma_dev(ibdev); | 156 | dev = get_ocrdma_dev(ibdev); |
157 | if (port > 1) { | 157 | if (port > 1) { |
158 | ocrdma_err("%s(%d) invalid_port=0x%x\n", __func__, | 158 | pr_err("%s(%d) invalid_port=0x%x\n", __func__, dev->id, port); |
159 | dev->id, port); | ||
160 | return -EINVAL; | 159 | return -EINVAL; |
161 | } | 160 | } |
162 | return 0; | 161 | return 0; |
@@ -442,8 +441,8 @@ static struct ocrdma_mr *ocrdma_alloc_lkey(struct ib_pd *ibpd, | |||
442 | struct ocrdma_dev *dev = pd->dev; | 441 | struct ocrdma_dev *dev = pd->dev; |
443 | 442 | ||
444 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { | 443 | if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) { |
445 | ocrdma_err("%s(%d) leaving err, invalid access rights\n", | 444 | pr_err("%s(%d) leaving err, invalid access rights\n", |
446 | __func__, dev->id); | 445 | __func__, dev->id); |
447 | return ERR_PTR(-EINVAL); | 446 | return ERR_PTR(-EINVAL); |
448 | } | 447 | } |
449 | 448 | ||
@@ -703,8 +702,8 @@ static int ocrdma_copy_cq_uresp(struct ocrdma_cq *cq, struct ib_udata *udata, | |||
703 | uresp.phase_change = cq->phase_change ? 1 : 0; | 702 | uresp.phase_change = cq->phase_change ? 1 : 0; |
704 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | 703 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); |
705 | if (status) { | 704 | if (status) { |
706 | ocrdma_err("%s(%d) copy error cqid=0x%x.\n", | 705 | pr_err("%s(%d) copy error cqid=0x%x.\n", |
707 | __func__, cq->dev->id, cq->id); | 706 | __func__, cq->dev->id, cq->id); |
708 | goto err; | 707 | goto err; |
709 | } | 708 | } |
710 | uctx = get_ocrdma_ucontext(ib_ctx); | 709 | uctx = get_ocrdma_ucontext(ib_ctx); |
@@ -822,57 +821,56 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |||
822 | if (attrs->qp_type != IB_QPT_GSI && | 821 | if (attrs->qp_type != IB_QPT_GSI && |
823 | attrs->qp_type != IB_QPT_RC && | 822 | attrs->qp_type != IB_QPT_RC && |
824 | attrs->qp_type != IB_QPT_UD) { | 823 | attrs->qp_type != IB_QPT_UD) { |
825 | ocrdma_err("%s(%d) unsupported qp type=0x%x requested\n", | 824 | pr_err("%s(%d) unsupported qp type=0x%x requested\n", |
826 | __func__, dev->id, attrs->qp_type); | 825 | __func__, dev->id, attrs->qp_type); |
827 | return -EINVAL; | 826 | return -EINVAL; |
828 | } | 827 | } |
829 | if (attrs->cap.max_send_wr > dev->attr.max_wqe) { | 828 | if (attrs->cap.max_send_wr > dev->attr.max_wqe) { |
830 | ocrdma_err("%s(%d) unsupported send_wr=0x%x requested\n", | 829 | pr_err("%s(%d) unsupported send_wr=0x%x requested\n", |
831 | __func__, dev->id, attrs->cap.max_send_wr); | 830 | __func__, dev->id, attrs->cap.max_send_wr); |
832 | ocrdma_err("%s(%d) supported send_wr=0x%x\n", | 831 | pr_err("%s(%d) supported send_wr=0x%x\n", |
833 | __func__, dev->id, dev->attr.max_wqe); | 832 | __func__, dev->id, dev->attr.max_wqe); |
834 | return -EINVAL; | 833 | return -EINVAL; |
835 | } | 834 | } |
836 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { | 835 | if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) { |
837 | ocrdma_err("%s(%d) unsupported recv_wr=0x%x requested\n", | 836 | pr_err("%s(%d) unsupported recv_wr=0x%x requested\n", |
838 | __func__, dev->id, attrs->cap.max_recv_wr); | 837 | __func__, dev->id, attrs->cap.max_recv_wr); |
839 | ocrdma_err("%s(%d) supported recv_wr=0x%x\n", | 838 | pr_err("%s(%d) supported recv_wr=0x%x\n", |
840 | __func__, dev->id, dev->attr.max_rqe); | 839 | __func__, dev->id, dev->attr.max_rqe); |
841 | return -EINVAL; | 840 | return -EINVAL; |
842 | } | 841 | } |
843 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { | 842 | if (attrs->cap.max_inline_data > dev->attr.max_inline_data) { |
844 | ocrdma_err("%s(%d) unsupported inline data size=0x%x" | 843 | pr_err("%s(%d) unsupported inline data size=0x%x requested\n", |
845 | " requested\n", __func__, dev->id, | 844 | __func__, dev->id, attrs->cap.max_inline_data); |
846 | attrs->cap.max_inline_data); | 845 | pr_err("%s(%d) supported inline data size=0x%x\n", |
847 | ocrdma_err("%s(%d) supported inline data size=0x%x\n", | 846 | __func__, dev->id, dev->attr.max_inline_data); |
848 | __func__, dev->id, dev->attr.max_inline_data); | ||
849 | return -EINVAL; | 847 | return -EINVAL; |
850 | } | 848 | } |
851 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { | 849 | if (attrs->cap.max_send_sge > dev->attr.max_send_sge) { |
852 | ocrdma_err("%s(%d) unsupported send_sge=0x%x requested\n", | 850 | pr_err("%s(%d) unsupported send_sge=0x%x requested\n", |
853 | __func__, dev->id, attrs->cap.max_send_sge); | 851 | __func__, dev->id, attrs->cap.max_send_sge); |
854 | ocrdma_err("%s(%d) supported send_sge=0x%x\n", | 852 | pr_err("%s(%d) supported send_sge=0x%x\n", |
855 | __func__, dev->id, dev->attr.max_send_sge); | 853 | __func__, dev->id, dev->attr.max_send_sge); |
856 | return -EINVAL; | 854 | return -EINVAL; |
857 | } | 855 | } |
858 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { | 856 | if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) { |
859 | ocrdma_err("%s(%d) unsupported recv_sge=0x%x requested\n", | 857 | pr_err("%s(%d) unsupported recv_sge=0x%x requested\n", |
860 | __func__, dev->id, attrs->cap.max_recv_sge); | 858 | __func__, dev->id, attrs->cap.max_recv_sge); |
861 | ocrdma_err("%s(%d) supported recv_sge=0x%x\n", | 859 | pr_err("%s(%d) supported recv_sge=0x%x\n", |
862 | __func__, dev->id, dev->attr.max_recv_sge); | 860 | __func__, dev->id, dev->attr.max_recv_sge); |
863 | return -EINVAL; | 861 | return -EINVAL; |
864 | } | 862 | } |
865 | /* unprivileged user space cannot create special QP */ | 863 | /* unprivileged user space cannot create special QP */ |
866 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { | 864 | if (ibpd->uobject && attrs->qp_type == IB_QPT_GSI) { |
867 | ocrdma_err | 865 | pr_err |
868 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", | 866 | ("%s(%d) Userspace can't create special QPs of type=0x%x\n", |
869 | __func__, dev->id, attrs->qp_type); | 867 | __func__, dev->id, attrs->qp_type); |
870 | return -EINVAL; | 868 | return -EINVAL; |
871 | } | 869 | } |
872 | /* allow creating only one GSI type of QP */ | 870 | /* allow creating only one GSI type of QP */ |
873 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { | 871 | if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) { |
874 | ocrdma_err("%s(%d) GSI special QPs already created.\n", | 872 | pr_err("%s(%d) GSI special QPs already created.\n", |
875 | __func__, dev->id); | 873 | __func__, dev->id); |
876 | return -EINVAL; | 874 | return -EINVAL; |
877 | } | 875 | } |
878 | /* verify consumer QPs are not trying to use GSI QP's CQ */ | 876 | /* verify consumer QPs are not trying to use GSI QP's CQ */ |
@@ -881,8 +879,8 @@ static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev, | |||
881 | (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || | 879 | (dev->gsi_sqcq == get_ocrdma_cq(attrs->recv_cq)) || |
882 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || | 880 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->send_cq)) || |
883 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { | 881 | (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) { |
884 | ocrdma_err("%s(%d) Consumer QP cannot use GSI CQs.\n", | 882 | pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n", |
885 | __func__, dev->id); | 883 | __func__, dev->id); |
886 | return -EINVAL; | 884 | return -EINVAL; |
887 | } | 885 | } |
888 | } | 886 | } |
@@ -934,7 +932,7 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, | |||
934 | } | 932 | } |
935 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); | 933 | status = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); |
936 | if (status) { | 934 | if (status) { |
937 | ocrdma_err("%s(%d) user copy error.\n", __func__, dev->id); | 935 | pr_err("%s(%d) user copy error.\n", __func__, dev->id); |
938 | goto err; | 936 | goto err; |
939 | } | 937 | } |
940 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], | 938 | status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0], |
@@ -1088,7 +1086,7 @@ mbx_err: | |||
1088 | kfree(qp->wqe_wr_id_tbl); | 1086 | kfree(qp->wqe_wr_id_tbl); |
1089 | kfree(qp->rqe_wr_id_tbl); | 1087 | kfree(qp->rqe_wr_id_tbl); |
1090 | kfree(qp); | 1088 | kfree(qp); |
1091 | ocrdma_err("%s(%d) error=%d\n", __func__, dev->id, status); | 1089 | pr_err("%s(%d) error=%d\n", __func__, dev->id, status); |
1092 | gen_err: | 1090 | gen_err: |
1093 | return ERR_PTR(status); | 1091 | return ERR_PTR(status); |
1094 | } | 1092 | } |
@@ -1138,10 +1136,10 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, | |||
1138 | spin_unlock_irqrestore(&qp->q_lock, flags); | 1136 | spin_unlock_irqrestore(&qp->q_lock, flags); |
1139 | 1137 | ||
1140 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { | 1138 | if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) { |
1141 | ocrdma_err("%s(%d) invalid attribute mask=0x%x specified for " | 1139 | pr_err("%s(%d) invalid attribute mask=0x%x specified for\n" |
1142 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", | 1140 | "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n", |
1143 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, | 1141 | __func__, dev->id, attr_mask, qp->id, ibqp->qp_type, |
1144 | old_qps, new_qps); | 1142 | old_qps, new_qps); |
1145 | goto param_err; | 1143 | goto param_err; |
1146 | } | 1144 | } |
1147 | 1145 | ||
@@ -1640,9 +1638,9 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, | |||
1640 | { | 1638 | { |
1641 | if (wr->send_flags & IB_SEND_INLINE) { | 1639 | if (wr->send_flags & IB_SEND_INLINE) { |
1642 | if (wr->sg_list[0].length > qp->max_inline_data) { | 1640 | if (wr->sg_list[0].length > qp->max_inline_data) { |
1643 | ocrdma_err("%s() supported_len=0x%x," | 1641 | pr_err("%s() supported_len=0x%x,\n" |
1644 | " unspported len req=0x%x\n", __func__, | 1642 | " unspported len req=0x%x\n", __func__, |
1645 | qp->max_inline_data, wr->sg_list[0].length); | 1643 | qp->max_inline_data, wr->sg_list[0].length); |
1646 | return -EINVAL; | 1644 | return -EINVAL; |
1647 | } | 1645 | } |
1648 | memcpy(sge, | 1646 | memcpy(sge, |
@@ -2057,8 +2055,8 @@ static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc, | |||
2057 | break; | 2055 | break; |
2058 | default: | 2056 | default: |
2059 | ibwc->status = IB_WC_GENERAL_ERR; | 2057 | ibwc->status = IB_WC_GENERAL_ERR; |
2060 | ocrdma_err("%s() invalid opcode received = 0x%x\n", | 2058 | pr_err("%s() invalid opcode received = 0x%x\n", |
2061 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); | 2059 | __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK); |
2062 | break; | 2060 | break; |
2063 | }; | 2061 | }; |
2064 | } | 2062 | } |