aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorStefan Roscher <ossrosch@linux.vnet.ibm.com>2009-05-13 19:52:43 -0400
committerRoland Dreier <rolandd@cisco.com>2009-05-13 19:52:43 -0400
commit1988d1fa1a9d642c5714a6afc9775fba0627f3ed (patch)
tree9eeca20f2831d8ebe32795838c1190f1ecc01017 /drivers/infiniband/hw
parentc94f156f63c835ffc02b686f9d4238b106f31a5d (diff)
IB/ehca: Remove unnecessary memory operations for userspace queue pairs
The queue map for flush completion circumvention is only used for kernel space queue pairs. This patch skips the allocation of the queue maps in case the QP is created for userspace. In addition, this patch does not iomap the galpas for kernel usage if the queue pair is only used in userspace. These changes will improve the performance of creation of userspace queue pairs. Signed-off-by: Stefan Roscher <stefan.roscher@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c94
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c6
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h2
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.c11
-rw-r--r--drivers/infiniband/hw/ehca/hcp_phyp.h2
5 files changed, 65 insertions, 50 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 00c108159714..ead4e718c082 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(
461 ib_device); 461 ib_device);
462 struct ib_ucontext *context = NULL; 462 struct ib_ucontext *context = NULL;
463 u64 h_ret; 463 u64 h_ret;
464 int is_llqp = 0, has_srq = 0; 464 int is_llqp = 0, has_srq = 0, is_user = 0;
465 int qp_type, max_send_sge, max_recv_sge, ret; 465 int qp_type, max_send_sge, max_recv_sge, ret;
466 466
467 /* h_call's out parameters */ 467 /* h_call's out parameters */
@@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(
609 } 609 }
610 } 610 }
611 611
612 if (pd->uobject && udata)
613 context = pd->uobject->context;
614
615 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL); 612 my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);
616 if (!my_qp) { 613 if (!my_qp) {
617 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); 614 ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd);
@@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(
619 return ERR_PTR(-ENOMEM); 616 return ERR_PTR(-ENOMEM);
620 } 617 }
621 618
619 if (pd->uobject && udata) {
620 is_user = 1;
621 context = pd->uobject->context;
622 }
623
622 atomic_set(&my_qp->nr_events, 0); 624 atomic_set(&my_qp->nr_events, 0);
623 init_waitqueue_head(&my_qp->wait_completion); 625 init_waitqueue_head(&my_qp->wait_completion);
624 spin_lock_init(&my_qp->spinlock_s); 626 spin_lock_init(&my_qp->spinlock_s);
@@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(
707 (parms.squeue.is_small || parms.rqueue.is_small); 709 (parms.squeue.is_small || parms.rqueue.is_small);
708 } 710 }
709 711
710 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); 712 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);
711 if (h_ret != H_SUCCESS) { 713 if (h_ret != H_SUCCESS) {
712 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli", 714 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",
713 h_ret); 715 h_ret);
@@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(
769 goto create_qp_exit2; 771 goto create_qp_exit2;
770 } 772 }
771 773
772 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / 774 if (!is_user) {
773 my_qp->ipz_squeue.qe_size; 775 my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length /
774 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * 776 my_qp->ipz_squeue.qe_size;
775 sizeof(struct ehca_qmap_entry)); 777 my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries *
776 if (!my_qp->sq_map.map) { 778 sizeof(struct ehca_qmap_entry));
777 ehca_err(pd->device, "Couldn't allocate squeue " 779 if (!my_qp->sq_map.map) {
778 "map ret=%i", ret); 780 ehca_err(pd->device, "Couldn't allocate squeue "
779 goto create_qp_exit3; 781 "map ret=%i", ret);
782 goto create_qp_exit3;
783 }
784 INIT_LIST_HEAD(&my_qp->sq_err_node);
785 /* to avoid the generation of bogus flush CQEs */
786 reset_queue_map(&my_qp->sq_map);
780 } 787 }
781 INIT_LIST_HEAD(&my_qp->sq_err_node);
782 /* to avoid the generation of bogus flush CQEs */
783 reset_queue_map(&my_qp->sq_map);
784 } 788 }
785 789
786 if (HAS_RQ(my_qp)) { 790 if (HAS_RQ(my_qp)) {
@@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(
792 "and pages ret=%i", ret); 796 "and pages ret=%i", ret);
793 goto create_qp_exit4; 797 goto create_qp_exit4;
794 } 798 }
795 799 if (!is_user) {
796 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / 800 my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length /
797 my_qp->ipz_rqueue.qe_size; 801 my_qp->ipz_rqueue.qe_size;
798 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * 802 my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries *
799 sizeof(struct ehca_qmap_entry)); 803 sizeof(struct ehca_qmap_entry));
800 if (!my_qp->rq_map.map) { 804 if (!my_qp->rq_map.map) {
801 ehca_err(pd->device, "Couldn't allocate squeue " 805 ehca_err(pd->device, "Couldn't allocate squeue "
802 "map ret=%i", ret); 806 "map ret=%i", ret);
803 goto create_qp_exit5; 807 goto create_qp_exit5;
808 }
809 INIT_LIST_HEAD(&my_qp->rq_err_node);
810 /* to avoid the generation of bogus flush CQEs */
811 reset_queue_map(&my_qp->rq_map);
804 } 812 }
805 INIT_LIST_HEAD(&my_qp->rq_err_node); 813 } else if (init_attr->srq && !is_user) {
806 /* to avoid the generation of bogus flush CQEs */
807 reset_queue_map(&my_qp->rq_map);
808 } else if (init_attr->srq) {
809 /* this is a base QP, use the queue map of the SRQ */ 814 /* this is a base QP, use the queue map of the SRQ */
810 my_qp->rq_map = my_srq->rq_map; 815 my_qp->rq_map = my_srq->rq_map;
811 INIT_LIST_HEAD(&my_qp->rq_err_node); 816 INIT_LIST_HEAD(&my_qp->rq_err_node);
@@ -918,7 +923,7 @@ create_qp_exit7:
918 kfree(my_qp->mod_qp_parm); 923 kfree(my_qp->mod_qp_parm);
919 924
920create_qp_exit6: 925create_qp_exit6:
921 if (HAS_RQ(my_qp)) 926 if (HAS_RQ(my_qp) && !is_user)
922 vfree(my_qp->rq_map.map); 927 vfree(my_qp->rq_map.map);
923 928
924create_qp_exit5: 929create_qp_exit5:
@@ -926,7 +931,7 @@ create_qp_exit5:
926 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 931 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
927 932
928create_qp_exit4: 933create_qp_exit4:
929 if (HAS_SQ(my_qp)) 934 if (HAS_SQ(my_qp) && !is_user)
930 vfree(my_qp->sq_map.map); 935 vfree(my_qp->sq_map.map);
931 936
932create_qp_exit3: 937create_qp_exit3:
@@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 u64 update_mask; 1249 u64 update_mask;
1245 u64 h_ret; 1250 u64 h_ret;
1246 int bad_wqe_cnt = 0; 1251 int bad_wqe_cnt = 0;
1252 int is_user = 0;
1247 int squeue_locked = 0; 1253 int squeue_locked = 0;
1248 unsigned long flags = 0; 1254 unsigned long flags = 0;
1249 1255
@@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1266 ret = ehca2ib_return_code(h_ret); 1272 ret = ehca2ib_return_code(h_ret);
1267 goto modify_qp_exit1; 1273 goto modify_qp_exit1;
1268 } 1274 }
1275 if (ibqp->uobject)
1276 is_user = 1;
1269 1277
1270 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); 1278 qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state);
1271 1279
@@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1728 goto modify_qp_exit2; 1736 goto modify_qp_exit2;
1729 } 1737 }
1730 } 1738 }
1731 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { 1739 if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)
1740 && !is_user) {
1732 ret = check_for_left_cqes(my_qp, shca); 1741 ret = check_for_left_cqes(my_qp, shca);
1733 if (ret) 1742 if (ret)
1734 goto modify_qp_exit2; 1743 goto modify_qp_exit2;
@@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1738 ipz_qeit_reset(&my_qp->ipz_rqueue); 1747 ipz_qeit_reset(&my_qp->ipz_rqueue);
1739 ipz_qeit_reset(&my_qp->ipz_squeue); 1748 ipz_qeit_reset(&my_qp->ipz_squeue);
1740 1749
1741 if (qp_cur_state == IB_QPS_ERR) { 1750 if (qp_cur_state == IB_QPS_ERR && !is_user) {
1742 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 1751 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
1743 1752
1744 if (HAS_RQ(my_qp)) 1753 if (HAS_RQ(my_qp))
1745 del_from_err_list(my_qp->recv_cq, 1754 del_from_err_list(my_qp->recv_cq,
1746 &my_qp->rq_err_node); 1755 &my_qp->rq_err_node);
1747 } 1756 }
1748 reset_queue_map(&my_qp->sq_map); 1757 if (!is_user)
1758 reset_queue_map(&my_qp->sq_map);
1749 1759
1750 if (HAS_RQ(my_qp)) 1760 if (HAS_RQ(my_qp) && !is_user)
1751 reset_queue_map(&my_qp->rq_map); 1761 reset_queue_map(&my_qp->rq_map);
1752 } 1762 }
1753 1763
@@ -2138,10 +2148,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2138 int ret; 2148 int ret;
2139 u64 h_ret; 2149 u64 h_ret;
2140 u8 port_num; 2150 u8 port_num;
2151 int is_user = 0;
2141 enum ib_qp_type qp_type; 2152 enum ib_qp_type qp_type;
2142 unsigned long flags; 2153 unsigned long flags;
2143 2154
2144 if (uobject) { 2155 if (uobject) {
2156 is_user = 1;
2145 if (my_qp->mm_count_galpa || 2157 if (my_qp->mm_count_galpa ||
2146 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 2158 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
2147 ehca_err(dev, "Resources still referenced in " 2159 ehca_err(dev, "Resources still referenced in "
@@ -2168,10 +2180,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2168 * SRQs will never get into an error list and do not have a recv_cq, 2180 * SRQs will never get into an error list and do not have a recv_cq,
2169 * so we need to skip them here. 2181 * so we need to skip them here.
2170 */ 2182 */
2171 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) 2183 if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)
2172 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); 2184 del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node);
2173 2185
2174 if (HAS_SQ(my_qp)) 2186 if (HAS_SQ(my_qp) && !is_user)
2175 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node); 2187 del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);
2176 2188
2177 /* now wait until all pending events have completed */ 2189 /* now wait until all pending events have completed */
@@ -2209,13 +2221,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
2209 2221
2210 if (HAS_RQ(my_qp)) { 2222 if (HAS_RQ(my_qp)) {
2211 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 2223 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
2212 2224 if (!is_user)
2213 vfree(my_qp->rq_map.map); 2225 vfree(my_qp->rq_map.map);
2214 } 2226 }
2215 if (HAS_SQ(my_qp)) { 2227 if (HAS_SQ(my_qp)) {
2216 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); 2228 ipz_queue_dtor(my_pd, &my_qp->ipz_squeue);
2217 2229 if (!is_user)
2218 vfree(my_qp->sq_map.map); 2230 vfree(my_qp->sq_map.map);
2219 } 2231 }
2220 kmem_cache_free(qp_cache, my_qp); 2232 kmem_cache_free(qp_cache, my_qp);
2221 atomic_dec(&shca->num_qps); 2233 atomic_dec(&shca->num_qps);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index d0ab0c0d5e91..4d5dc3304d42 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -284,7 +284,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
284 param->act_pages = (u32)outs[4]; 284 param->act_pages = (u32)outs[4];
285 285
286 if (ret == H_SUCCESS) 286 if (ret == H_SUCCESS)
287 hcp_galpas_ctor(&cq->galpas, outs[5], outs[6]); 287 hcp_galpas_ctor(&cq->galpas, 0, outs[5], outs[6]);
288 288
289 if (ret == H_NOT_ENOUGH_RESOURCES) 289 if (ret == H_NOT_ENOUGH_RESOURCES)
290 ehca_gen_err("Not enough resources. ret=%lli", ret); 290 ehca_gen_err("Not enough resources. ret=%lli", ret);
@@ -293,7 +293,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
293} 293}
294 294
295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 295u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
296 struct ehca_alloc_qp_parms *parms) 296 struct ehca_alloc_qp_parms *parms, int is_user)
297{ 297{
298 u64 ret; 298 u64 ret;
299 u64 allocate_controls, max_r10_reg, r11, r12; 299 u64 allocate_controls, max_r10_reg, r11, r12;
@@ -359,7 +359,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); 359 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
360 360
361 if (ret == H_SUCCESS) 361 if (ret == H_SUCCESS)
362 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]); 362 hcp_galpas_ctor(&parms->galpas, is_user, outs[6], outs[6]);
363 363
364 if (ret == H_NOT_ENOUGH_RESOURCES) 364 if (ret == H_NOT_ENOUGH_RESOURCES)
365 ehca_gen_err("Not enough resources. ret=%lli", ret); 365 ehca_gen_err("Not enough resources. ret=%lli", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2c3c6e0ea5c2..39c1c3618ec7 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,7 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
78 * initialize resources, create empty QPPTs (2 rings). 78 * initialize resources, create empty QPPTs (2 rings).
79 */ 79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_alloc_qp_parms *parms); 81 struct ehca_alloc_qp_parms *parms, int is_user);
82 82
83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, 83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
84 const u8 port_id, 84 const u8 port_id,
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.c b/drivers/infiniband/hw/ehca/hcp_phyp.c
index 214821095cb1..b3e0e72e8a73 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.c
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.c
@@ -54,12 +54,15 @@ int hcall_unmap_page(u64 mapaddr)
54 return 0; 54 return 0;
55} 55}
56 56
57int hcp_galpas_ctor(struct h_galpas *galpas, 57int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
58 u64 paddr_kernel, u64 paddr_user) 58 u64 paddr_kernel, u64 paddr_user)
59{ 59{
60 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle); 60 if (!is_user) {
61 if (ret) 61 int ret = hcall_map_page(paddr_kernel, &galpas->kernel.fw_handle);
62 return ret; 62 if (ret)
63 return ret;
64 } else
65 galpas->kernel.fw_handle = 0;
63 66
64 galpas->user.fw_handle = paddr_user; 67 galpas->user.fw_handle = paddr_user;
65 68
diff --git a/drivers/infiniband/hw/ehca/hcp_phyp.h b/drivers/infiniband/hw/ehca/hcp_phyp.h
index 5305c2a3ed94..204227d5303a 100644
--- a/drivers/infiniband/hw/ehca/hcp_phyp.h
+++ b/drivers/infiniband/hw/ehca/hcp_phyp.h
@@ -78,7 +78,7 @@ static inline void hipz_galpa_store(struct h_galpa galpa, u32 offset, u64 value)
78 *(volatile u64 __force *)addr = value; 78 *(volatile u64 __force *)addr = value;
79} 79}
80 80
81int hcp_galpas_ctor(struct h_galpas *galpas, 81int hcp_galpas_ctor(struct h_galpas *galpas, int is_user,
82 u64 paddr_kernel, u64 paddr_user); 82 u64 paddr_kernel, u64 paddr_user);
83 83
84int hcp_galpas_dtor(struct h_galpas *galpas); 84int hcp_galpas_dtor(struct h_galpas *galpas);