aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca/ehca_qp.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c112
1 files changed, 58 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00c108159714..0338f1fabe8a 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
461 ib_device); 461 ib_device);
462 struct ib_ucontext *context = NULL; 462 struct ib_ucontext *context = NULL;
463 u64 h_ret; 463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0; 464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret; 465 int qp_type, max_send_sge, max_recv_sge, ret;
466 466
467 /* h_call's out parameters */ 467 /* h_call's out parameters */
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
609 } 609 }
610 } 610 }
611 611
612 if (pd->uobject && udata)
613 context = pd->uobject->context;
614
615 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
616 if (!my_qp) { 613 if (!my_qp) {
617 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
619 return ERR_PTR(-ENOMEM); 616 return ERR_PTR(-ENOMEM);
620 } 617 }
621 618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
622 atomic_set(&my_qp->nr_events, 0); 624 atomic_set(&my_qp->nr_events, 0);
623 init_waitqueue_head(&my_qp->wait_completion); 625 init_waitqueue_head(&my_qp->wait_completion);
624 spin_lock_init(&my_qp->spinlock_s); 626 spin_lock_init(&my_qp->spinlock_s);
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
707 (parms.squeue.is_small || parms.rqueue.is_small); 709 (parms.squeue.is_small || parms.rqueue.is_small);
708 } 710 }
709 711
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
711 if (h_ret != H_SUCCESS) { 713 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 715 h_ret);
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
769 goto create_qp_exit2; 771 goto create_qp_exit2;
770 } 772 }
771 773
772 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / 774 if (!is_user) {
773 my_qp->ipz_squeue.qe_size; 775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
774 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * 776 my_qp->ipz_squeue.qe_size;
775 sizeof(struct ehca_qmap_entry)); 777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
776 if (!my_qp->sq_map.map) { 778 sizeof(struct ehca_qmap_entry));
777 ehca_err(pd->device, "Couldn't allocate squeue " 779 if (!my_qp->sq_map.map) {
778 "map ret=%i", ret); 780 ehca_err(pd->device, "Couldn't allocate squeue "
779 goto create_qp_exit3; 781 "map ret=%i", ret);
782 goto create_qp_exit3;
783 }
784 INIT_LIST_HEAD(&my_qp->sq_err_node);
785 /* to avoid the generation of bogus flush CQEs */
786 reset_queue_map(&my_qp->sq_map);
780 } 787 }
781 INIT_LIST_HEAD(&my_qp->sq_err_node);
782 /* to avoid the generation of bogus flush CQEs */
783 reset_queue_map(&my_qp->sq_map);
784 } 788 }
785 789
786 if (HAS_RQ(my_qp)) { 790 if (HAS_RQ(my_qp)) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
792 "and pages ret=%i", ret); 796 "and pages ret=%i", ret);
793 goto create_qp_exit4; 797 goto create_qp_exit4;
794 } 798 }
795 799 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / 800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size; 801 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * 802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry)); 803 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) { 804 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue " 805 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret); 806 "map ret=%i", ret);
803 goto create_qp_exit5; 807 goto create_qp_exit5;
808 }
809 INIT_LIST_HEAD(&my_qp->rq_err_node);
810 /* to avoid the generation of bogus flush CQEs */
811 reset_queue_map(&my_qp->rq_map);
804 } 812 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node); 813 } else if (init_attr->srq && !is_user) {
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 } else if (init_attr->srq) {
809 /* this is a base QP, use the queue map of the SRQ */ 814 /* this is a base QP, use the queue map of the SRQ */
810 my_qp->rq_map = my_srq->rq_map; 815 my_qp->rq_map = my_srq->rq_map;
811 INIT_LIST_HEAD(&my_qp->rq_err_node); 816 INIT_LIST_HEAD(&my_qp->rq_err_node);
@@ -918,7 +923,7 @@ create_qp_exit7:
918 kfree(my_qp->mod_qp_parm); 923 kfree(my_qp->mod_qp_parm);
919 924
920create_qp_exit6: 925create_qp_exit6:
921 if (HAS_RQ(my_qp)) 926 if (HAS_RQ(my_qp) && !is_user)
922 vfree(my_qp->rq_map.map); 927 vfree(my_qp->rq_map.map);
923 928
924create_qp_exit5: 929create_qp_exit5:
@@ -926,7 +931,7 @@ create_qp_exit5:
926 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
927 932
928create_qp_exit4: 933create_qp_exit4:
929 if (HAS_SQ(my_qp)) 934 if (HAS_SQ(my_qp) && !is_user)
930 vfree(my_qp->sq_map.map); 935 vfree(my_qp->sq_map.map);
931 936
932create_qp_exit3: 937create_qp_exit3:
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 u64 update_mask; 1249 u64 update_mask;
1245 u64 h_ret; 1250 u64 h_ret;
1246 int bad_wqe_cnt = 0; 1251 int bad_wqe_cnt = 0;
1252 int is_user = 0;
1247 int squeue_locked = 0; 1253 int squeue_locked = 0;
1248 unsigned long flags = 0; 1254 unsigned long flags = 0;
1249 1255
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1266 ret = ehca2ib_return_code(h_ret); 1272 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1273 goto modify_qp_exit1;
1268 } 1274 }
1275 if (ibqp->uobject)
1276 is_user = 1;
1269 1277
1270 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); 1278 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1271 1279
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1728 goto modify_qp_exit2; 1736 goto modify_qp_exit2;
1729 } 1737 }
1730 } 1738 }
1731 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { 1739 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1740 && !is_user) {
1732 ret = check_for_left_cqes(my_qp, shca); 1741 ret = check_for_left_cqes(my_qp, shca);
1733 if (ret) 1742 if (ret)
1734 goto modify_qp_exit2; 1743 goto modify_qp_exit2;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1738 ipz_qeit_reset(&my_qp->ipz_rqueue); 1747 ipz_qeit_reset(&my_qp->ipz_rqueue);
1739 ipz_qeit_reset(&my_qp->ipz_squeue); 1748 ipz_qeit_reset(&my_qp->ipz_squeue);
1740 1749
1741 if (qp_cur_state == IB_QPS_ERR) { 1750 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1742 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 1751 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1743 1752
1744 if (HAS_RQ(my_qp)) 1753 if (HAS_RQ(my_qp))
1745 del_from_err_list(my_qp->recv_cq, 1754 del_from_err_list(my_qp->recv_cq,
1746 &my_qp->rq_err_node); 1755 &my_qp->rq_err_node);
1747 } 1756 }
1748 reset_queue_map(&my_qp->sq_map); 1757 if (!is_user)
1758 reset_queue_map(&my_qp->sq_map);
1749 1759
1750 if (HAS_RQ(my_qp)) 1760 if (HAS_RQ(my_qp) && !is_user)
1751 reset_queue_map(&my_qp->rq_map); 1761 reset_queue_map(&my_qp->rq_map);
1752 } 1762 }
1753 1763
@@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,
1952 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size; 1962 qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;
1953 qp_attr->dest_qp_num = qpcb->dest_qp_nr; 1963 qp_attr->dest_qp_num = qpcb->dest_qp_nr;
1954 1964
1955 qp_attr->pkey_index = 1965 qp_attr->pkey_index = qpcb->prim_p_key_idx;
1956 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); 1966 qp_attr->port_num = qpcb->prim_phys_port;
1957
1958 qp_attr->port_num =
1959 EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port);
1960
1961 qp_attr->timeout = qpcb->timeout; 1967 qp_attr->timeout = qpcb->timeout;
1962 qp_attr->retry_cnt = qpcb->retry_count; 1968 qp_attr->retry_cnt = qpcb->retry_count;
1963 qp_attr->rnr_retry = qpcb->rnr_retry_count; 1969 qp_attr->rnr_retry = qpcb->rnr_retry_count;
1964 1970
1965 qp_attr->alt_pkey_index = 1971 qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;
1966 EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx);
1967
1968 qp_attr->alt_port_num = qpcb->alt_phys_port; 1972 qp_attr->alt_port_num = qpcb->alt_phys_port;
1969 qp_attr->alt_timeout = qpcb->timeout_al; 1973 qp_attr->alt_timeout = qpcb->timeout_al;
1970 1974
@@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
2051 update_mask |= 2055 update_mask |=
2052 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1) 2056 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
2053 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); 2057 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
2054 mqpcb->curr_srq_limit = 2058 mqpcb->curr_srq_limit = attr->srq_limit;
2055 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
2056 mqpcb->qp_aff_asyn_ev_log_reg = 2059 mqpcb->qp_aff_asyn_ev_log_reg =
2057 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1); 2060 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
2058 } 2061 }
@@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
2115 2118
2116 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1; 2119 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
2117 srq_attr->max_sge = 3; 2120 srq_attr->max_sge = 3;
2118 srq_attr->srq_limit = EHCA_BMASK_GET( 2121 srq_attr->srq_limit = qpcb->curr_srq_limit;
2119 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
2120 2122
2121 if (ehca_debug_level >= 2) 2123 if (ehca_debug_level >= 2)
2122 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); 2124 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
@@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2138 int ret; 2140 int ret;
2139 u64 h_ret; 2141 u64 h_ret;
2140 u8 port_num; 2142 u8 port_num;
2143 int is_user = 0;
2141 enum ib_qp_type qp_type; 2144 enum ib_qp_type qp_type;
2142 unsigned long flags; 2145 unsigned long flags;
2143 2146
2144 if (uobject) { 2147 if (uobject) {
2148 is_user = 1;
2145 if (my_qp->mm_count_galpa || 2149 if (my_qp->mm_count_galpa ||
2146 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 2150 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2147 ehca_err(dev, "Resources still referenced in " 2151 ehca_err(dev, "Resources still referenced in "
@@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2168 * SRQs will never get into an error list and do not have a recv_cq, 2172 * SRQs will never get into an error list and do not have a recv_cq,
2169 * so we need to skip them here. 2173 * so we need to skip them here.
2170 */ 2174 */
2171 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) 2175 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2172 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); 2176 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2173 2177
2174 if (HAS_SQ(my_qp)) 2178 if (HAS_SQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 2179 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2176 2180
2177 /* now wait until all pending events have completed */ 2181 /* now wait until all pending events have completed */
@@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2209 2213
2210 if (HAS_RQ(my_qp)) { 2214 if (HAS_RQ(my_qp)) {
2211 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2215 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2212 2216 if (!is_user)
2213 vfree(my_qp->rq_map.map); 2217 vfree(my_qp->rq_map.map);
2214 } 2218 }
2215 if (HAS_SQ(my_qp)) { 2219 if (HAS_SQ(my_qp)) {
2216 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2220 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2217 2221 if (!is_user)
2218 vfree(my_qp->sq_map.map); 2222 vfree(my_qp->sq_map.map);
2219 } 2223 }
2220 kmem_cache_free(qp_cache, my_qp); 2224 kmem_cache_free(qp_cache, my_qp);
2221 atomic_dec(&shca->num_qps); 2225 atomic_dec(&shca->num_qps);