aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorJoachim Fenkes <fenkes@de.ibm.com>2007-07-09 09:25:10 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:27 -0400
commita6a12947fbf4a1782535468d756b0d44babf9760 (patch)
tree4819ad9abd03f92e51c076745e7068028d06b105 /drivers/infiniband/hw
parent9a79fc0a1b815cbd05a8e37ea838acfccb7235cc (diff)
IB/ehca: add Shared Receive Queue support
Support SRQs on eHCA2. Since an SRQ is a QP for eHCA2, a lot of code (structures, create, destroy, post_recv) can be shared between QP and SRQ. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h26
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes_pSeries.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c451
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c47
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c4
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c23
-rw-r--r--drivers/infiniband/hw/ehca/hipz_hw.h1
9 files changed, 480 insertions, 107 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 6e75db68996e..9d689aeb928a 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -5,6 +5,7 @@
5 * 5 *
6 * Authors: Heiko J Schick <schickhj@de.ibm.com> 6 * Authors: Heiko J Schick <schickhj@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com> 7 * Christoph Raisch <raisch@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
8 * 9 *
9 * Copyright (c) 2005 IBM Corporation 10 * Copyright (c) 2005 IBM Corporation
10 * 11 *
@@ -117,9 +118,20 @@ struct ehca_pd {
117 u32 ownpid; 118 u32 ownpid;
118}; 119};
119 120
121enum ehca_ext_qp_type {
122 EQPT_NORMAL = 0,
123 EQPT_LLQP = 1,
124 EQPT_SRQBASE = 2,
125 EQPT_SRQ = 3,
126};
127
120struct ehca_qp { 128struct ehca_qp {
121 struct ib_qp ib_qp; 129 union {
130 struct ib_qp ib_qp;
131 struct ib_srq ib_srq;
132 };
122 u32 qp_type; 133 u32 qp_type;
134 enum ehca_ext_qp_type ext_type;
123 struct ipz_queue ipz_squeue; 135 struct ipz_queue ipz_squeue;
124 struct ipz_queue ipz_rqueue; 136 struct ipz_queue ipz_rqueue;
125 struct h_galpas galpas; 137 struct h_galpas galpas;
@@ -142,6 +154,10 @@ struct ehca_qp {
142 u32 mm_count_galpa; 154 u32 mm_count_galpa;
143}; 155};
144 156
157#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
158#define HAS_SQ(qp) (qp->ext_type != EQPT_SRQ)
159#define HAS_RQ(qp) (qp->ext_type != EQPT_SRQBASE)
160
145/* must be power of 2 */ 161/* must be power of 2 */
146#define QP_HASHTAB_LEN 8 162#define QP_HASHTAB_LEN 8
147 163
@@ -307,6 +323,7 @@ struct ehca_create_qp_resp {
307 u32 qp_num; 323 u32 qp_num;
308 u32 token; 324 u32 token;
309 u32 qp_type; 325 u32 qp_type;
326 u32 ext_type;
310 u32 qkey; 327 u32 qkey;
311 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */ 328 /* qp_num assigned by ehca: sqp0/1 may have got different numbers */
312 u32 real_qp_num; 329 u32 real_qp_num;
@@ -329,13 +346,6 @@ enum ehca_service_type {
329 ST_UD = 3, 346 ST_UD = 3,
330}; 347};
331 348
332enum ehca_ext_qp_type {
333 EQPT_NORMAL = 0,
334 EQPT_LLQP = 1,
335 EQPT_SRQBASE = 2,
336 EQPT_SRQ = 3,
337};
338
339enum ehca_ll_comp_flags { 349enum ehca_ll_comp_flags {
340 LLQP_SEND_COMP = 0x20, 350 LLQP_SEND_COMP = 0x20,
341 LLQP_RECV_COMP = 0x40, 351 LLQP_RECV_COMP = 0x40,
diff --git a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
index 5665f213b81a..fb3df5c271e7 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes_pSeries.h
@@ -228,8 +228,8 @@ struct hcp_modify_qp_control_block {
228#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31) 228#define MQPCB_QP_NUMBER EHCA_BMASK_IBM(8,31)
229#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48) 229#define MQPCB_MASK_QP_ENABLE EHCA_BMASK_IBM(48,48)
230#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31) 230#define MQPCB_QP_ENABLE EHCA_BMASK_IBM(31,31)
231#define MQPCB_MASK_CURR_SQR_LIMIT EHCA_BMASK_IBM(49,49) 231#define MQPCB_MASK_CURR_SRQ_LIMIT EHCA_BMASK_IBM(49,49)
232#define MQPCB_CURR_SQR_LIMIT EHCA_BMASK_IBM(15,31) 232#define MQPCB_CURR_SRQ_LIMIT EHCA_BMASK_IBM(16,31)
233#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50) 233#define MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG EHCA_BMASK_IBM(50,50)
234#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51) 234#define MQPCB_MASK_SHARED_RQ_HNDL EHCA_BMASK_IBM(51,51)
235 235
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 37e7fe0908cf..fd84a804814c 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -154,6 +154,21 @@ int ehca_post_send(struct ib_qp *qp, struct ib_send_wr *send_wr,
154int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr, 154int ehca_post_recv(struct ib_qp *qp, struct ib_recv_wr *recv_wr,
155 struct ib_recv_wr **bad_recv_wr); 155 struct ib_recv_wr **bad_recv_wr);
156 156
157int ehca_post_srq_recv(struct ib_srq *srq,
158 struct ib_recv_wr *recv_wr,
159 struct ib_recv_wr **bad_recv_wr);
160
161struct ib_srq *ehca_create_srq(struct ib_pd *pd,
162 struct ib_srq_init_attr *init_attr,
163 struct ib_udata *udata);
164
165int ehca_modify_srq(struct ib_srq *srq, struct ib_srq_attr *attr,
166 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
167
168int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr);
169
170int ehca_destroy_srq(struct ib_srq *srq);
171
157u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp, 172u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ibqp,
158 struct ib_qp_init_attr *qp_init_attr); 173 struct ib_qp_init_attr *qp_init_attr);
159 174
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index eb22a6b296d9..ca215bac9742 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -343,7 +343,7 @@ int ehca_init_device(struct ehca_shca *shca)
343 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX); 343 strlcpy(shca->ib_device.name, "ehca%d", IB_DEVICE_NAME_MAX);
344 shca->ib_device.owner = THIS_MODULE; 344 shca->ib_device.owner = THIS_MODULE;
345 345
346 shca->ib_device.uverbs_abi_ver = 6; 346 shca->ib_device.uverbs_abi_ver = 7;
347 shca->ib_device.uverbs_cmd_mask = 347 shca->ib_device.uverbs_cmd_mask =
348 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 348 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
349 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 349 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
@@ -411,6 +411,20 @@ int ehca_init_device(struct ehca_shca *shca)
411 /* shca->ib_device.process_mad = ehca_process_mad; */ 411 /* shca->ib_device.process_mad = ehca_process_mad; */
412 shca->ib_device.mmap = ehca_mmap; 412 shca->ib_device.mmap = ehca_mmap;
413 413
414 if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) {
415 shca->ib_device.uverbs_cmd_mask |=
416 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
417 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
418 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
419 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
420
421 shca->ib_device.create_srq = ehca_create_srq;
422 shca->ib_device.modify_srq = ehca_modify_srq;
423 shca->ib_device.query_srq = ehca_query_srq;
424 shca->ib_device.destroy_srq = ehca_destroy_srq;
425 shca->ib_device.post_srq_recv = ehca_post_srq_recv;
426 }
427
414 return ret; 428 return ret;
415} 429}
416 430
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 513471a7bffa..6f35f07dc02c 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -3,7 +3,9 @@
3 * 3 *
4 * QP functions 4 * QP functions
5 * 5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com> 6 * Authors: Joachim Fenkes <fenkes@de.ibm.com>
7 * Stefan Roscher <stefan.roscher@de.ibm.com>
8 * Waleri Fomin <fomin@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com> 9 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com> 10 * Reinhard Ernst <rernst@de.ibm.com>
9 * Heiko J Schick <schickhj@de.ibm.com> 11 * Heiko J Schick <schickhj@de.ibm.com>
@@ -261,6 +263,19 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
261} 263}
262 264
263/* 265/*
266 * init userspace queue info from ipz_queue data
267 */
268static inline void queue2resp(struct ipzu_queue_resp *resp,
269 struct ipz_queue *queue)
270{
271 resp->qe_size = queue->qe_size;
272 resp->act_nr_of_sg = queue->act_nr_of_sg;
273 resp->queue_length = queue->queue_length;
274 resp->pagesize = queue->pagesize;
275 resp->toggle_state = queue->toggle_state;
276}
277
278/*
264 * init_qp_queue initializes/constructs r/squeue and registers queue pages. 279 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
265 */ 280 */
266static inline int init_qp_queue(struct ehca_shca *shca, 281static inline int init_qp_queue(struct ehca_shca *shca,
@@ -338,11 +353,17 @@ init_qp_queue1:
338 return ret; 353 return ret;
339} 354}
340 355
341struct ib_qp *ehca_create_qp(struct ib_pd *pd, 356/*
342 struct ib_qp_init_attr *init_attr, 357 * Create an ib_qp struct that is either a QP or an SRQ, depending on
343 struct ib_udata *udata) 358 * the value of the is_srq parameter. If init_attr and srq_init_attr share
359 * fields, the field out of init_attr is used.
360 */
361struct ehca_qp *internal_create_qp(struct ib_pd *pd,
362 struct ib_qp_init_attr *init_attr,
363 struct ib_srq_init_attr *srq_init_attr,
364 struct ib_udata *udata, int is_srq)
344{ 365{
345 static int da_rc_msg_size[]={ 128, 256, 512, 1024, 2048, 4096 }; 366 static int da_rc_msg_size[] = { 128, 256, 512, 1024, 2048, 4096 };
346 static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 }; 367 static int da_ud_sq_msg_size[]={ 128, 384, 896, 1920, 3968 };
347 struct ehca_qp *my_qp; 368 struct ehca_qp *my_qp;
348 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd); 369 struct ehca_pd *my_pd = container_of(pd, struct ehca_pd, ib_pd);
@@ -355,7 +376,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
355 376
356 /* h_call's out parameters */ 377 /* h_call's out parameters */
357 struct ehca_alloc_qp_parms parms; 378 struct ehca_alloc_qp_parms parms;
358 u32 swqe_size = 0, rwqe_size = 0; 379 u32 swqe_size = 0, rwqe_size = 0, ib_qp_num;
359 unsigned long flags; 380 unsigned long flags;
360 381
361 memset(&parms, 0, sizeof(parms)); 382 memset(&parms, 0, sizeof(parms));
@@ -376,13 +397,34 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
376 } 397 }
377 qp_type &= 0x1F; 398 qp_type &= 0x1F;
378 399
379 /* check for SRQ */ 400 /* handle SRQ base QPs */
380 has_srq = !!(init_attr->srq); 401 if (init_attr->srq) {
402 struct ehca_qp *my_srq =
403 container_of(init_attr->srq, struct ehca_qp, ib_srq);
404
405 has_srq = 1;
406 parms.ext_type = EQPT_SRQBASE;
407 parms.srq_qpn = my_srq->real_qp_num;
408 parms.srq_token = my_srq->token;
409 }
410
381 if (is_llqp && has_srq) { 411 if (is_llqp && has_srq) {
382 ehca_err(pd->device, "LLQPs can't have an SRQ"); 412 ehca_err(pd->device, "LLQPs can't have an SRQ");
383 return ERR_PTR(-EINVAL); 413 return ERR_PTR(-EINVAL);
384 } 414 }
385 415
416 /* handle SRQs */
417 if (is_srq) {
418 parms.ext_type = EQPT_SRQ;
419 parms.srq_limit = srq_init_attr->attr.srq_limit;
420 if (init_attr->cap.max_recv_sge > 3) {
421 ehca_err(pd->device, "no more than three SGEs "
422 "supported for SRQ pd=%p max_sge=%x",
423 pd, init_attr->cap.max_recv_sge);
424 return ERR_PTR(-EINVAL);
425 }
426 }
427
386 /* check QP type */ 428 /* check QP type */
387 if (qp_type != IB_QPT_UD && 429 if (qp_type != IB_QPT_UD &&
388 qp_type != IB_QPT_UC && 430 qp_type != IB_QPT_UC &&
@@ -423,11 +465,15 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
423 465
424 spin_lock_init(&my_qp->spinlock_s); 466 spin_lock_init(&my_qp->spinlock_s);
425 spin_lock_init(&my_qp->spinlock_r); 467 spin_lock_init(&my_qp->spinlock_r);
468 my_qp->qp_type = qp_type;
469 my_qp->ext_type = parms.ext_type;
426 470
427 my_qp->recv_cq = 471 if (init_attr->recv_cq)
428 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq); 472 my_qp->recv_cq =
429 my_qp->send_cq = 473 container_of(init_attr->recv_cq, struct ehca_cq, ib_cq);
430 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); 474 if (init_attr->send_cq)
475 my_qp->send_cq =
476 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
431 477
432 do { 478 do {
433 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { 479 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
@@ -471,8 +517,10 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
471 parms.token = my_qp->token; 517 parms.token = my_qp->token;
472 parms.eq_handle = shca->eq.ipz_eq_handle; 518 parms.eq_handle = shca->eq.ipz_eq_handle;
473 parms.pd = my_pd->fw_pd; 519 parms.pd = my_pd->fw_pd;
474 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle; 520 if (my_qp->send_cq)
475 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle; 521 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
522 if (my_qp->recv_cq)
523 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
476 524
477 parms.max_send_wr = init_attr->cap.max_send_wr; 525 parms.max_send_wr = init_attr->cap.max_send_wr;
478 parms.max_recv_wr = init_attr->cap.max_recv_wr; 526 parms.max_recv_wr = init_attr->cap.max_recv_wr;
@@ -487,7 +535,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
487 goto create_qp_exit1; 535 goto create_qp_exit1;
488 } 536 }
489 537
490 my_qp->ib_qp.qp_num = my_qp->real_qp_num = parms.real_qp_num; 538 ib_qp_num = my_qp->real_qp_num = parms.real_qp_num;
491 my_qp->ipz_qp_handle = parms.qp_handle; 539 my_qp->ipz_qp_handle = parms.qp_handle;
492 my_qp->galpas = parms.galpas; 540 my_qp->galpas = parms.galpas;
493 541
@@ -535,7 +583,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
535 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; 583 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
536 parms.act_nr_send_sges = init_attr->cap.max_send_sge; 584 parms.act_nr_send_sges = init_attr->cap.max_send_sge;
537 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; 585 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
538 my_qp->ib_qp.qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1; 586 ib_qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
539 } 587 }
540 588
541 break; 589 break;
@@ -545,36 +593,51 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
545 } 593 }
546 594
547 /* initialize r/squeue and register queue pages */ 595 /* initialize r/squeue and register queue pages */
548 ret = init_qp_queue(shca, my_qp, &my_qp->ipz_squeue, 0, 596 if (HAS_SQ(my_qp)) {
549 has_srq ? H_SUCCESS : H_PAGE_REGISTERED, 597 ret = init_qp_queue(
550 parms.nr_sq_pages, swqe_size, 598 shca, my_qp, &my_qp->ipz_squeue, 0,
551 parms.act_nr_send_sges); 599 HAS_RQ(my_qp) ? H_PAGE_REGISTERED : H_SUCCESS,
552 if (ret) { 600 parms.nr_sq_pages, swqe_size,
553 ehca_err(pd->device, 601 parms.act_nr_send_sges);
554 "Couldn't initialize squeue and pages ret=%x", ret); 602 if (ret) {
555 goto create_qp_exit2; 603 ehca_err(pd->device, "Couldn't initialize squeue "
604 "and pages ret=%x", ret);
605 goto create_qp_exit2;
606 }
556 } 607 }
557 608
558 ret = init_qp_queue(shca, my_qp, &my_qp->ipz_rqueue, 1, H_SUCCESS, 609 if (HAS_RQ(my_qp)) {
559 parms.nr_rq_pages, rwqe_size, 610 ret = init_qp_queue(
560 parms.act_nr_recv_sges); 611 shca, my_qp, &my_qp->ipz_rqueue, 1,
561 if (ret) { 612 H_SUCCESS, parms.nr_rq_pages, rwqe_size,
562 ehca_err(pd->device, 613 parms.act_nr_recv_sges);
563 "Couldn't initialize rqueue and pages ret=%x", ret); 614 if (ret) {
564 goto create_qp_exit3; 615 ehca_err(pd->device, "Couldn't initialize rqueue "
616 "and pages ret=%x", ret);
617 goto create_qp_exit3;
618 }
565 } 619 }
566 620
567 my_qp->ib_qp.pd = &my_pd->ib_pd; 621 if (is_srq) {
568 my_qp->ib_qp.device = my_pd->ib_pd.device; 622 my_qp->ib_srq.pd = &my_pd->ib_pd;
623 my_qp->ib_srq.device = my_pd->ib_pd.device;
569 624
570 my_qp->ib_qp.recv_cq = init_attr->recv_cq; 625 my_qp->ib_srq.srq_context = init_attr->qp_context;
571 my_qp->ib_qp.send_cq = init_attr->send_cq; 626 my_qp->ib_srq.event_handler = init_attr->event_handler;
627 } else {
628 my_qp->ib_qp.qp_num = ib_qp_num;
629 my_qp->ib_qp.pd = &my_pd->ib_pd;
630 my_qp->ib_qp.device = my_pd->ib_pd.device;
631
632 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
633 my_qp->ib_qp.send_cq = init_attr->send_cq;
572 634
573 my_qp->ib_qp.qp_type = my_qp->qp_type = qp_type; 635 my_qp->ib_qp.qp_type = qp_type;
574 my_qp->ib_qp.srq = init_attr->srq; 636 my_qp->ib_qp.srq = init_attr->srq;
575 637
576 my_qp->ib_qp.qp_context = init_attr->qp_context; 638 my_qp->ib_qp.qp_context = init_attr->qp_context;
577 my_qp->ib_qp.event_handler = init_attr->event_handler; 639 my_qp->ib_qp.event_handler = init_attr->event_handler;
640 }
578 641
579 init_attr->cap.max_inline_data = 0; /* not supported yet */ 642 init_attr->cap.max_inline_data = 0; /* not supported yet */
580 init_attr->cap.max_recv_sge = parms.act_nr_recv_sges; 643 init_attr->cap.max_recv_sge = parms.act_nr_recv_sges;
@@ -593,41 +656,32 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
593 goto create_qp_exit4; 656 goto create_qp_exit4;
594 } 657 }
595 } 658 }
596 if (init_attr->send_cq) { 659
597 struct ehca_cq *cq = container_of(init_attr->send_cq, 660 if (my_qp->send_cq) {
598 struct ehca_cq, ib_cq); 661 ret = ehca_cq_assign_qp(my_qp->send_cq, my_qp);
599 ret = ehca_cq_assign_qp(cq, my_qp);
600 if (ret) { 662 if (ret) {
601 ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", 663 ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
602 ret); 664 ret);
603 goto create_qp_exit4; 665 goto create_qp_exit4;
604 } 666 }
605 my_qp->send_cq = cq;
606 } 667 }
668
607 /* copy queues, galpa data to user space */ 669 /* copy queues, galpa data to user space */
608 if (context && udata) { 670 if (context && udata) {
609 struct ipz_queue *ipz_rqueue = &my_qp->ipz_rqueue;
610 struct ipz_queue *ipz_squeue = &my_qp->ipz_squeue;
611 struct ehca_create_qp_resp resp; 671 struct ehca_create_qp_resp resp;
612 memset(&resp, 0, sizeof(resp)); 672 memset(&resp, 0, sizeof(resp));
613 673
614 resp.qp_num = my_qp->real_qp_num; 674 resp.qp_num = my_qp->real_qp_num;
615 resp.token = my_qp->token; 675 resp.token = my_qp->token;
616 resp.qp_type = my_qp->qp_type; 676 resp.qp_type = my_qp->qp_type;
677 resp.ext_type = my_qp->ext_type;
617 resp.qkey = my_qp->qkey; 678 resp.qkey = my_qp->qkey;
618 resp.real_qp_num = my_qp->real_qp_num; 679 resp.real_qp_num = my_qp->real_qp_num;
619 /* rqueue properties */ 680 if (HAS_SQ(my_qp))
620 resp.ipz_rqueue.qe_size = ipz_rqueue->qe_size; 681 queue2resp(&resp.ipz_squeue, &my_qp->ipz_squeue);
621 resp.ipz_rqueue.act_nr_of_sg = ipz_rqueue->act_nr_of_sg; 682 if (HAS_RQ(my_qp))
622 resp.ipz_rqueue.queue_length = ipz_rqueue->queue_length; 683 queue2resp(&resp.ipz_rqueue, &my_qp->ipz_rqueue);
623 resp.ipz_rqueue.pagesize = ipz_rqueue->pagesize; 684
624 resp.ipz_rqueue.toggle_state = ipz_rqueue->toggle_state;
625 /* squeue properties */
626 resp.ipz_squeue.qe_size = ipz_squeue->qe_size;
627 resp.ipz_squeue.act_nr_of_sg = ipz_squeue->act_nr_of_sg;
628 resp.ipz_squeue.queue_length = ipz_squeue->queue_length;
629 resp.ipz_squeue.pagesize = ipz_squeue->pagesize;
630 resp.ipz_squeue.toggle_state = ipz_squeue->toggle_state;
631 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 685 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
632 ehca_err(pd->device, "Copy to udata failed"); 686 ehca_err(pd->device, "Copy to udata failed");
633 ret = -EINVAL; 687 ret = -EINVAL;
@@ -635,13 +689,15 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
635 } 689 }
636 } 690 }
637 691
638 return &my_qp->ib_qp; 692 return my_qp;
639 693
640create_qp_exit4: 694create_qp_exit4:
641 ipz_queue_dtor(&my_qp->ipz_rqueue); 695 if (HAS_RQ(my_qp))
696 ipz_queue_dtor(&my_qp->ipz_rqueue);
642 697
643create_qp_exit3: 698create_qp_exit3:
644 ipz_queue_dtor(&my_qp->ipz_squeue); 699 if (HAS_SQ(my_qp))
700 ipz_queue_dtor(&my_qp->ipz_squeue);
645 701
646create_qp_exit2: 702create_qp_exit2:
647 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 703 hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
@@ -656,6 +712,114 @@ create_qp_exit0:
656 return ERR_PTR(ret); 712 return ERR_PTR(ret);
657} 713}
658 714
715struct ib_qp *ehca_create_qp(struct ib_pd *pd,
716 struct ib_qp_init_attr *qp_init_attr,
717 struct ib_udata *udata)
718{
719 struct ehca_qp *ret;
720
721 ret = internal_create_qp(pd, qp_init_attr, NULL, udata, 0);
722 return IS_ERR(ret) ? (struct ib_qp *) ret : &ret->ib_qp;
723}
724
725int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
726 struct ib_uobject *uobject);
727
728struct ib_srq *ehca_create_srq(struct ib_pd *pd,
729 struct ib_srq_init_attr *srq_init_attr,
730 struct ib_udata *udata)
731{
732 struct ib_qp_init_attr qp_init_attr;
733 struct ehca_qp *my_qp;
734 struct ib_srq *ret;
735 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
736 ib_device);
737 struct hcp_modify_qp_control_block *mqpcb;
738 u64 hret, update_mask;
739
740 /* For common attributes, internal_create_qp() takes its info
741 * out of qp_init_attr, so copy all common attrs there.
742 */
743 memset(&qp_init_attr, 0, sizeof(qp_init_attr));
744 qp_init_attr.event_handler = srq_init_attr->event_handler;
745 qp_init_attr.qp_context = srq_init_attr->srq_context;
746 qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
747 qp_init_attr.qp_type = IB_QPT_RC;
748 qp_init_attr.cap.max_recv_wr = srq_init_attr->attr.max_wr;
749 qp_init_attr.cap.max_recv_sge = srq_init_attr->attr.max_sge;
750
751 my_qp = internal_create_qp(pd, &qp_init_attr, srq_init_attr, udata, 1);
752 if (IS_ERR(my_qp))
753 return (struct ib_srq *) my_qp;
754
755 /* copy back return values */
756 srq_init_attr->attr.max_wr = qp_init_attr.cap.max_recv_wr;
757 srq_init_attr->attr.max_sge = qp_init_attr.cap.max_recv_sge;
758
759 /* drive SRQ into RTR state */
760 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
761 if (!mqpcb) {
762 ehca_err(pd->device, "Could not get zeroed page for mqpcb "
763 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
764 ret = ERR_PTR(-ENOMEM);
765 goto create_srq1;
766 }
767
768 mqpcb->qp_state = EHCA_QPS_INIT;
769 mqpcb->prim_phys_port = 1;
770 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
771 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
772 my_qp->ipz_qp_handle,
773 &my_qp->pf,
774 update_mask,
775 mqpcb, my_qp->galpas.kernel);
776 if (hret != H_SUCCESS) {
777 ehca_err(pd->device, "Could not modify SRQ to INIT"
778 "ehca_qp=%p qp_num=%x hret=%lx",
779 my_qp, my_qp->real_qp_num, hret);
780 goto create_srq2;
781 }
782
783 mqpcb->qp_enable = 1;
784 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_ENABLE, 1);
785 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
786 my_qp->ipz_qp_handle,
787 &my_qp->pf,
788 update_mask,
789 mqpcb, my_qp->galpas.kernel);
790 if (hret != H_SUCCESS) {
791 ehca_err(pd->device, "Could not enable SRQ"
792 "ehca_qp=%p qp_num=%x hret=%lx",
793 my_qp, my_qp->real_qp_num, hret);
794 goto create_srq2;
795 }
796
797 mqpcb->qp_state = EHCA_QPS_RTR;
798 update_mask = EHCA_BMASK_SET(MQPCB_MASK_QP_STATE, 1);
799 hret = hipz_h_modify_qp(shca->ipz_hca_handle,
800 my_qp->ipz_qp_handle,
801 &my_qp->pf,
802 update_mask,
803 mqpcb, my_qp->galpas.kernel);
804 if (hret != H_SUCCESS) {
805 ehca_err(pd->device, "Could not modify SRQ to RTR"
806 "ehca_qp=%p qp_num=%x hret=%lx",
807 my_qp, my_qp->real_qp_num, hret);
808 goto create_srq2;
809 }
810
811 return &my_qp->ib_srq;
812
813create_srq2:
814 ret = ERR_PTR(ehca2ib_return_code(hret));
815 ehca_free_fw_ctrlblock(mqpcb);
816
817create_srq1:
818 internal_destroy_qp(pd->device, my_qp, my_qp->ib_srq.uobject);
819
820 return ret;
821}
822
659/* 823/*
660 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts 824 * prepare_sqe_rts called by internal_modify_qp() at trans sqe -> rts
661 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe 825 * set purge bit of bad wqe and subsequent wqes to avoid reentering sqe
@@ -1341,42 +1505,159 @@ query_qp_exit1:
1341 return ret; 1505 return ret;
1342} 1506}
1343 1507
1344int ehca_destroy_qp(struct ib_qp *ibqp) 1508int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1509 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1345{ 1510{
1346 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1511 struct ehca_qp *my_qp =
1347 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, 1512 container_of(ibsrq, struct ehca_qp, ib_srq);
1513 struct ehca_pd *my_pd =
1514 container_of(ibsrq->pd, struct ehca_pd, ib_pd);
1515 struct ehca_shca *shca =
1516 container_of(ibsrq->pd->device, struct ehca_shca, ib_device);
1517 struct hcp_modify_qp_control_block *mqpcb;
1518 u64 update_mask;
1519 u64 h_ret;
1520 int ret = 0;
1521
1522 u32 cur_pid = current->tgid;
1523 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1524 my_pd->ownpid != cur_pid) {
1525 ehca_err(ibsrq->pd->device, "Invalid caller pid=%x ownpid=%x",
1526 cur_pid, my_pd->ownpid);
1527 return -EINVAL;
1528 }
1529
1530 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1531 if (!mqpcb) {
1532 ehca_err(ibsrq->device, "Could not get zeroed page for mqpcb "
1533 "ehca_qp=%p qp_num=%x ", my_qp, my_qp->real_qp_num);
1534 return -ENOMEM;
1535 }
1536
1537 update_mask = 0;
1538 if (attr_mask & IB_SRQ_LIMIT) {
1539 attr_mask &= ~IB_SRQ_LIMIT;
1540 update_mask |=
1541 EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)
1542 | EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1);
1543 mqpcb->curr_srq_limit =
1544 EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit);
1545 mqpcb->qp_aff_asyn_ev_log_reg =
1546 EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);
1547 }
1548
1549 /* by now, all bits in attr_mask should have been cleared */
1550 if (attr_mask) {
1551 ehca_err(ibsrq->device, "invalid attribute mask bits set "
1552 "attr_mask=%x", attr_mask);
1553 ret = -EINVAL;
1554 goto modify_srq_exit0;
1555 }
1556
1557 if (ehca_debug_level)
1558 ehca_dmp(mqpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1559
1560 h_ret = hipz_h_modify_qp(shca->ipz_hca_handle, my_qp->ipz_qp_handle,
1561 NULL, update_mask, mqpcb,
1562 my_qp->galpas.kernel);
1563
1564 if (h_ret != H_SUCCESS) {
1565 ret = ehca2ib_return_code(h_ret);
1566 ehca_err(ibsrq->device, "hipz_h_modify_qp() failed rc=%lx "
1567 "ehca_qp=%p qp_num=%x",
1568 h_ret, my_qp, my_qp->real_qp_num);
1569 }
1570
1571modify_srq_exit0:
1572 ehca_free_fw_ctrlblock(mqpcb);
1573
1574 return ret;
1575}
1576
1577int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)
1578{
1579 struct ehca_qp *my_qp = container_of(srq, struct ehca_qp, ib_srq);
1580 struct ehca_pd *my_pd = container_of(srq->pd, struct ehca_pd, ib_pd);
1581 struct ehca_shca *shca = container_of(srq->device, struct ehca_shca,
1348 ib_device); 1582 ib_device);
1583 struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle;
1584 struct hcp_modify_qp_control_block *qpcb;
1585 u32 cur_pid = current->tgid;
1586 int ret = 0;
1587 u64 h_ret;
1588
1589 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
1590 my_pd->ownpid != cur_pid) {
1591 ehca_err(srq->device, "Invalid caller pid=%x ownpid=%x",
1592 cur_pid, my_pd->ownpid);
1593 return -EINVAL;
1594 }
1595
1596 qpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
1597 if (!qpcb) {
1598 ehca_err(srq->device, "Out of memory for qpcb "
1599 "ehca_qp=%p qp_num=%x", my_qp, my_qp->real_qp_num);
1600 return -ENOMEM;
1601 }
1602
1603 h_ret = hipz_h_query_qp(adapter_handle, my_qp->ipz_qp_handle,
1604 NULL, qpcb, my_qp->galpas.kernel);
1605
1606 if (h_ret != H_SUCCESS) {
1607 ret = ehca2ib_return_code(h_ret);
1608 ehca_err(srq->device, "hipz_h_query_qp() failed "
1609 "ehca_qp=%p qp_num=%x h_ret=%lx",
1610 my_qp, my_qp->real_qp_num, h_ret);
1611 goto query_srq_exit1;
1612 }
1613
1614 srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;
1615 srq_attr->srq_limit = EHCA_BMASK_GET(
1616 MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit);
1617
1618 if (ehca_debug_level)
1619 ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num);
1620
1621query_srq_exit1:
1622 ehca_free_fw_ctrlblock(qpcb);
1623
1624 return ret;
1625}
1626
1627int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1628 struct ib_uobject *uobject)
1629{
1630 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
1349 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1631 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1350 ib_pd); 1632 ib_pd);
1351 u32 cur_pid = current->tgid; 1633 u32 cur_pid = current->tgid;
1352 u32 qp_num = ibqp->qp_num; 1634 u32 qp_num = my_qp->real_qp_num;
1353 int ret; 1635 int ret;
1354 u64 h_ret; 1636 u64 h_ret;
1355 u8 port_num; 1637 u8 port_num;
1356 enum ib_qp_type qp_type; 1638 enum ib_qp_type qp_type;
1357 unsigned long flags; 1639 unsigned long flags;
1358 1640
1359 if (ibqp->uobject) { 1641 if (uobject) {
1360 if (my_qp->mm_count_galpa || 1642 if (my_qp->mm_count_galpa ||
1361 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) { 1643 my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {
1362 ehca_err(ibqp->device, "Resources still referenced in " 1644 ehca_err(dev, "Resources still referenced in "
1363 "user space qp_num=%x", ibqp->qp_num); 1645 "user space qp_num=%x", qp_num);
1364 return -EINVAL; 1646 return -EINVAL;
1365 } 1647 }
1366 if (my_pd->ownpid != cur_pid) { 1648 if (my_pd->ownpid != cur_pid) {
1367 ehca_err(ibqp->device, "Invalid caller pid=%x ownpid=%x", 1649 ehca_err(dev, "Invalid caller pid=%x ownpid=%x",
1368 cur_pid, my_pd->ownpid); 1650 cur_pid, my_pd->ownpid);
1369 return -EINVAL; 1651 return -EINVAL;
1370 } 1652 }
1371 } 1653 }
1372 1654
1373 if (my_qp->send_cq) { 1655 if (my_qp->send_cq) {
1374 ret = ehca_cq_unassign_qp(my_qp->send_cq, 1656 ret = ehca_cq_unassign_qp(my_qp->send_cq, qp_num);
1375 my_qp->real_qp_num);
1376 if (ret) { 1657 if (ret) {
1377 ehca_err(ibqp->device, "Couldn't unassign qp from " 1658 ehca_err(dev, "Couldn't unassign qp from "
1378 "send_cq ret=%x qp_num=%x cq_num=%x", ret, 1659 "send_cq ret=%x qp_num=%x cq_num=%x", ret,
1379 my_qp->ib_qp.qp_num, my_qp->send_cq->cq_number); 1660 qp_num, my_qp->send_cq->cq_number);
1380 return ret; 1661 return ret;
1381 } 1662 }
1382 } 1663 }
@@ -1387,7 +1668,7 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1387 1668
1388 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp); 1669 h_ret = hipz_h_destroy_qp(shca->ipz_hca_handle, my_qp);
1389 if (h_ret != H_SUCCESS) { 1670 if (h_ret != H_SUCCESS) {
1390 ehca_err(ibqp->device, "hipz_h_destroy_qp() failed rc=%lx " 1671 ehca_err(dev, "hipz_h_destroy_qp() failed rc=%lx "
1391 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num); 1672 "ehca_qp=%p qp_num=%x", h_ret, my_qp, qp_num);
1392 return ehca2ib_return_code(h_ret); 1673 return ehca2ib_return_code(h_ret);
1393 } 1674 }
@@ -1398,7 +1679,7 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1398 /* no support for IB_QPT_SMI yet */ 1679 /* no support for IB_QPT_SMI yet */
1399 if (qp_type == IB_QPT_GSI) { 1680 if (qp_type == IB_QPT_GSI) {
1400 struct ib_event event; 1681 struct ib_event event;
1401 ehca_info(ibqp->device, "device %s: port %x is inactive.", 1682 ehca_info(dev, "device %s: port %x is inactive.",
1402 shca->ib_device.name, port_num); 1683 shca->ib_device.name, port_num);
1403 event.device = &shca->ib_device; 1684 event.device = &shca->ib_device;
1404 event.event = IB_EVENT_PORT_ERR; 1685 event.event = IB_EVENT_PORT_ERR;
@@ -1407,12 +1688,28 @@ int ehca_destroy_qp(struct ib_qp *ibqp)
1407 ib_dispatch_event(&event); 1688 ib_dispatch_event(&event);
1408 } 1689 }
1409 1690
1410 ipz_queue_dtor(&my_qp->ipz_rqueue); 1691 if (HAS_RQ(my_qp))
1411 ipz_queue_dtor(&my_qp->ipz_squeue); 1692 ipz_queue_dtor(&my_qp->ipz_rqueue);
1693 if (HAS_SQ(my_qp))
1694 ipz_queue_dtor(&my_qp->ipz_squeue);
1412 kmem_cache_free(qp_cache, my_qp); 1695 kmem_cache_free(qp_cache, my_qp);
1413 return 0; 1696 return 0;
1414} 1697}
1415 1698
1699int ehca_destroy_qp(struct ib_qp *qp)
1700{
1701 return internal_destroy_qp(qp->device,
1702 container_of(qp, struct ehca_qp, ib_qp),
1703 qp->uobject);
1704}
1705
1706int ehca_destroy_srq(struct ib_srq *srq)
1707{
1708 return internal_destroy_qp(srq->device,
1709 container_of(srq, struct ehca_qp, ib_srq),
1710 srq->uobject);
1711}
1712
1416int ehca_init_qp_cache(void) 1713int ehca_init_qp_cache(void)
1417{ 1714{
1418 qp_cache = kmem_cache_create("ehca_cache_qp", 1715 qp_cache = kmem_cache_create("ehca_cache_qp",
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 56c4527c884f..b5664fa34de3 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -3,8 +3,9 @@
3 * 3 *
4 * post_send/recv, poll_cq, req_notify 4 * post_send/recv, poll_cq, req_notify
5 * 5 *
6 * Authors: Waleri Fomin <fomin@de.ibm.com> 6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com> 7 * Waleri Fomin <fomin@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
8 * Reinhard Ernst <rernst@de.ibm.com> 9 * Reinhard Ernst <rernst@de.ibm.com>
9 * 10 *
10 * Copyright (c) 2005 IBM Corporation 11 * Copyright (c) 2005 IBM Corporation
@@ -413,17 +414,23 @@ post_send_exit0:
413 return ret; 414 return ret;
414} 415}
415 416
416int ehca_post_recv(struct ib_qp *qp, 417static int internal_post_recv(struct ehca_qp *my_qp,
417 struct ib_recv_wr *recv_wr, 418 struct ib_device *dev,
418 struct ib_recv_wr **bad_recv_wr) 419 struct ib_recv_wr *recv_wr,
420 struct ib_recv_wr **bad_recv_wr)
419{ 421{
420 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
421 struct ib_recv_wr *cur_recv_wr; 422 struct ib_recv_wr *cur_recv_wr;
422 struct ehca_wqe *wqe_p; 423 struct ehca_wqe *wqe_p;
423 int wqe_cnt = 0; 424 int wqe_cnt = 0;
424 int ret = 0; 425 int ret = 0;
425 unsigned long spl_flags; 426 unsigned long spl_flags;
426 427
428 if (unlikely(!HAS_RQ(my_qp))) {
429 ehca_err(dev, "QP has no RQ ehca_qp=%p qp_num=%x ext_type=%d",
430 my_qp, my_qp->real_qp_num, my_qp->ext_type);
431 return -ENODEV;
432 }
433
427 /* LOCK the QUEUE */ 434 /* LOCK the QUEUE */
428 spin_lock_irqsave(&my_qp->spinlock_r, spl_flags); 435 spin_lock_irqsave(&my_qp->spinlock_r, spl_flags);
429 436
@@ -439,8 +446,8 @@ int ehca_post_recv(struct ib_qp *qp,
439 *bad_recv_wr = cur_recv_wr; 446 *bad_recv_wr = cur_recv_wr;
440 if (wqe_cnt == 0) { 447 if (wqe_cnt == 0) {
441 ret = -ENOMEM; 448 ret = -ENOMEM;
442 ehca_err(qp->device, "Too many posted WQEs " 449 ehca_err(dev, "Too many posted WQEs "
443 "qp_num=%x", qp->qp_num); 450 "qp_num=%x", my_qp->real_qp_num);
444 } 451 }
445 goto post_recv_exit0; 452 goto post_recv_exit0;
446 } 453 }
@@ -455,14 +462,14 @@ int ehca_post_recv(struct ib_qp *qp,
455 *bad_recv_wr = cur_recv_wr; 462 *bad_recv_wr = cur_recv_wr;
456 if (wqe_cnt == 0) { 463 if (wqe_cnt == 0) {
457 ret = -EINVAL; 464 ret = -EINVAL;
458 ehca_err(qp->device, "Could not write WQE " 465 ehca_err(dev, "Could not write WQE "
459 "qp_num=%x", qp->qp_num); 466 "qp_num=%x", my_qp->real_qp_num);
460 } 467 }
461 goto post_recv_exit0; 468 goto post_recv_exit0;
462 } 469 }
463 wqe_cnt++; 470 wqe_cnt++;
464 ehca_gen_dbg("ehca_qp=%p qp_num=%x wqe_cnt=%d", 471 ehca_dbg(dev, "ehca_qp=%p qp_num=%x wqe_cnt=%d",
465 my_qp, qp->qp_num, wqe_cnt); 472 my_qp, my_qp->real_qp_num, wqe_cnt);
466 } /* eof for cur_recv_wr */ 473 } /* eof for cur_recv_wr */
467 474
468post_recv_exit0: 475post_recv_exit0:
@@ -472,6 +479,22 @@ post_recv_exit0:
472 return ret; 479 return ret;
473} 480}
474 481
482int ehca_post_recv(struct ib_qp *qp,
483 struct ib_recv_wr *recv_wr,
484 struct ib_recv_wr **bad_recv_wr)
485{
486 return internal_post_recv(container_of(qp, struct ehca_qp, ib_qp),
487 qp->device, recv_wr, bad_recv_wr);
488}
489
490int ehca_post_srq_recv(struct ib_srq *srq,
491 struct ib_recv_wr *recv_wr,
492 struct ib_recv_wr **bad_recv_wr)
493{
494 return internal_post_recv(container_of(srq, struct ehca_qp, ib_srq),
495 srq->device, recv_wr, bad_recv_wr);
496}
497
475/* 498/*
476 * ib_wc_opcode table converts ehca wc opcode to ib 499 * ib_wc_opcode table converts ehca wc opcode to ib
477 * Since we use zero to indicate invalid opcode, the actual ib opcode must 500 * Since we use zero to indicate invalid opcode, the actual ib opcode must
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 73db920b6945..d8fe37d56f1a 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -257,6 +257,7 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
257 struct ehca_cq *cq; 257 struct ehca_cq *cq;
258 struct ehca_qp *qp; 258 struct ehca_qp *qp;
259 struct ehca_pd *pd; 259 struct ehca_pd *pd;
260 struct ib_uobject *uobject;
260 261
261 switch (q_type) { 262 switch (q_type) {
262 case 1: /* CQ */ 263 case 1: /* CQ */
@@ -304,7 +305,8 @@ int ehca_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
304 return -ENOMEM; 305 return -ENOMEM;
305 } 306 }
306 307
307 if (!qp->ib_qp.uobject || qp->ib_qp.uobject->context != context) 308 uobject = IS_SRQ(qp) ? qp->ib_srq.uobject : qp->ib_qp.uobject;
309 if (!uobject || uobject->context != context)
308 return -EINVAL; 310 return -EINVAL;
309 311
310 ret = ehca_mmap_qp(vma, qp, rsrc_type); 312 ret = ehca_mmap_qp(vma, qp, rsrc_type);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 7efc4a2ad2b9..b0783773f1c8 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -5,6 +5,7 @@
5 * 5 *
6 * Authors: Christoph Raisch <raisch@de.ibm.com> 6 * Authors: Christoph Raisch <raisch@de.ibm.com>
7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com> 7 * Hoang-Nam Nguyen <hnguyen@de.ibm.com>
8 * Joachim Fenkes <fenkes@de.ibm.com>
8 * Gerd Bayer <gerd.bayer@de.ibm.com> 9 * Gerd Bayer <gerd.bayer@de.ibm.com>
9 * Waleri Fomin <fomin@de.ibm.com> 10 * Waleri Fomin <fomin@de.ibm.com>
10 * 11 *
@@ -62,6 +63,12 @@
62#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39) 63#define H_ALL_RES_QP_MAX_SEND_SGE EHCA_BMASK_IBM(32, 39)
63#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47) 64#define H_ALL_RES_QP_MAX_RECV_SGE EHCA_BMASK_IBM(40, 47)
64 65
66#define H_ALL_RES_QP_UD_AV_LKEY EHCA_BMASK_IBM(32, 63)
67#define H_ALL_RES_QP_SRQ_QP_TOKEN EHCA_BMASK_IBM(0, 31)
68#define H_ALL_RES_QP_SRQ_QP_HANDLE EHCA_BMASK_IBM(0, 64)
69#define H_ALL_RES_QP_SRQ_LIMIT EHCA_BMASK_IBM(48, 63)
70#define H_ALL_RES_QP_SRQ_QPN EHCA_BMASK_IBM(40, 63)
71
65#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31) 72#define H_ALL_RES_QP_ACT_OUTST_SEND_WR EHCA_BMASK_IBM(16, 31)
66#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63) 73#define H_ALL_RES_QP_ACT_OUTST_RECV_WR EHCA_BMASK_IBM(48, 63)
67#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15) 74#define H_ALL_RES_QP_ACT_SEND_SGE EHCA_BMASK_IBM(8, 15)
@@ -150,7 +157,7 @@ static long ehca_plpar_hcall9(unsigned long opcode,
150{ 157{
151 long ret; 158 long ret;
152 int i, sleep_msecs, lock_is_set = 0; 159 int i, sleep_msecs, lock_is_set = 0;
153 unsigned long flags; 160 unsigned long flags = 0;
154 161
155 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx " 162 ehca_gen_dbg("opcode=%lx arg1=%lx arg2=%lx arg3=%lx arg4=%lx "
156 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx", 163 "arg5=%lx arg6=%lx arg7=%lx arg8=%lx arg9=%lx",
@@ -282,8 +289,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
282 struct ehca_alloc_qp_parms *parms) 289 struct ehca_alloc_qp_parms *parms)
283{ 290{
284 u64 ret; 291 u64 ret;
285 u64 allocate_controls; 292 u64 allocate_controls, max_r10_reg, r11, r12;
286 u64 max_r10_reg;
287 u64 outs[PLPAR_HCALL9_BUFSIZE]; 293 u64 outs[PLPAR_HCALL9_BUFSIZE];
288 294
289 allocate_controls = 295 allocate_controls =
@@ -309,6 +315,13 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
309 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, 315 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
310 parms->max_recv_sge); 316 parms->max_recv_sge);
311 317
318 r11 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QP_TOKEN, parms->srq_token);
319
320 if (parms->ext_type == EQPT_SRQ)
321 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_LIMIT, parms->srq_limit);
322 else
323 r12 = EHCA_BMASK_SET(H_ALL_RES_QP_SRQ_QPN, parms->srq_qpn);
324
312 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 325 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
313 adapter_handle.handle, /* r4 */ 326 adapter_handle.handle, /* r4 */
314 allocate_controls, /* r5 */ 327 allocate_controls, /* r5 */
@@ -316,9 +329,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
316 parms->recv_cq_handle.handle, 329 parms->recv_cq_handle.handle,
317 parms->eq_handle.handle, 330 parms->eq_handle.handle,
318 ((u64)parms->token << 32) | parms->pd.value, 331 ((u64)parms->token << 32) | parms->pd.value,
319 max_r10_reg, /* r10 */ 332 max_r10_reg, r11, r12);
320 parms->ud_av_l_key_ctl, /* r11 */
321 0);
322 333
323 parms->qp_handle.handle = outs[0]; 334 parms->qp_handle.handle = outs[0];
324 parms->real_qp_num = (u32)outs[1]; 335 parms->real_qp_num = (u32)outs[1];
diff --git a/drivers/infiniband/hw/ehca/hipz_hw.h b/drivers/infiniband/hw/ehca/hipz_hw.h
index 9116fc943fee..dad6dea5636b 100644
--- a/drivers/infiniband/hw/ehca/hipz_hw.h
+++ b/drivers/infiniband/hw/ehca/hipz_hw.h
@@ -163,6 +163,7 @@ struct hipz_qptemm {
163 163
164#define QPX_SQADDER EHCA_BMASK_IBM(48,63) 164#define QPX_SQADDER EHCA_BMASK_IBM(48,63)
165#define QPX_RQADDER EHCA_BMASK_IBM(48,63) 165#define QPX_RQADDER EHCA_BMASK_IBM(48,63)
166#define QPX_AAELOG_RESET_SRQ_LIMIT EHCA_BMASK_IBM(3,3)
166 167
167#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x) 168#define QPTEMM_OFFSET(x) offsetof(struct hipz_qptemm,x)
168 169