aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
authorJoachim Fenkes <fenkes@de.ibm.com>2007-07-09 09:23:15 -0400
committerRoland Dreier <rolandd@cisco.com>2007-07-09 23:12:27 -0400
commit9a79fc0a1b815cbd05a8e37ea838acfccb7235cc (patch)
treeed94fc291a5017f94e2b9d57f6909a84e1eb40db /drivers/infiniband/hw/ehca
parent91f13aa3fc22e357b494c5b8270e94543870928d (diff)
IB/ehca: QP code restructuring in preparation for SRQ
- Replace init_qp_queues() by a shorter init_qp_queue(), eliminating duplicate code. - hipz_h_alloc_resource_qp() doesn't need a pointer to struct ehca_qp any longer. All input and output data is transferred through the parms parameter. - Change the interface to also support SRQ. Signed-off-by: Joachim Fenkes <fenkes@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h46
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c254
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.c35
-rw-r--r--drivers/infiniband/hw/ehca/hcp_if.h1
4 files changed, 166 insertions, 170 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 35d948f2502c..6e75db68996e 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -322,14 +322,49 @@ struct ehca_alloc_cq_parms {
322 struct ipz_eq_handle eq_handle; 322 struct ipz_eq_handle eq_handle;
323}; 323};
324 324
325enum ehca_service_type {
326 ST_RC = 0,
327 ST_UC = 1,
328 ST_RD = 2,
329 ST_UD = 3,
330};
331
332enum ehca_ext_qp_type {
333 EQPT_NORMAL = 0,
334 EQPT_LLQP = 1,
335 EQPT_SRQBASE = 2,
336 EQPT_SRQ = 3,
337};
338
339enum ehca_ll_comp_flags {
340 LLQP_SEND_COMP = 0x20,
341 LLQP_RECV_COMP = 0x40,
342 LLQP_COMP_MASK = 0x60,
343};
344
325struct ehca_alloc_qp_parms { 345struct ehca_alloc_qp_parms {
326 int servicetype; 346/* input parameters */
347 enum ehca_service_type servicetype;
327 int sigtype; 348 int sigtype;
328 int daqp_ctrl; 349 enum ehca_ext_qp_type ext_type;
329 int max_send_sge; 350 enum ehca_ll_comp_flags ll_comp_flags;
330 int max_recv_sge; 351
352 int max_send_wr, max_recv_wr;
353 int max_send_sge, max_recv_sge;
331 int ud_av_l_key_ctl; 354 int ud_av_l_key_ctl;
332 355
356 u32 token;
357 struct ipz_eq_handle eq_handle;
358 struct ipz_pd pd;
359 struct ipz_cq_handle send_cq_handle, recv_cq_handle;
360
361 u32 srq_qpn, srq_token, srq_limit;
362
363/* output parameters */
364 u32 real_qp_num;
365 struct ipz_qp_handle qp_handle;
366 struct h_galpas galpas;
367
333 u16 act_nr_send_wqes; 368 u16 act_nr_send_wqes;
334 u16 act_nr_recv_wqes; 369 u16 act_nr_recv_wqes;
335 u8 act_nr_recv_sges; 370 u8 act_nr_recv_sges;
@@ -337,9 +372,6 @@ struct ehca_alloc_qp_parms {
337 372
338 u32 nr_rq_pages; 373 u32 nr_rq_pages;
339 u32 nr_sq_pages; 374 u32 nr_sq_pages;
340
341 struct ipz_eq_handle ipz_eq_handle;
342 struct ipz_pd pd;
343}; 375};
344 376
345int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp); 377int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp);
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index b5bc787c77b6..513471a7bffa 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -234,13 +234,6 @@ static inline enum ib_qp_statetrans get_modqp_statetrans(int ib_fromstate,
234 return index; 234 return index;
235} 235}
236 236
237enum ehca_service_type {
238 ST_RC = 0,
239 ST_UC = 1,
240 ST_RD = 2,
241 ST_UD = 3
242};
243
244/* 237/*
245 * ibqptype2servicetype returns hcp service type corresponding to given 238 * ibqptype2servicetype returns hcp service type corresponding to given
246 * ib qp type used by create_qp() 239 * ib qp type used by create_qp()
@@ -268,15 +261,16 @@ static inline int ibqptype2servicetype(enum ib_qp_type ibqptype)
268} 261}
269 262
270/* 263/*
271 * init_qp_queues initializes/constructs r/squeue and registers queue pages. 264 * init_qp_queue initializes/constructs r/squeue and registers queue pages.
272 */ 265 */
273static inline int init_qp_queues(struct ehca_shca *shca, 266static inline int init_qp_queue(struct ehca_shca *shca,
274 struct ehca_qp *my_qp, 267 struct ehca_qp *my_qp,
275 int nr_sq_pages, 268 struct ipz_queue *queue,
276 int nr_rq_pages, 269 int q_type,
277 int swqe_size, 270 u64 expected_hret,
278 int rwqe_size, 271 int nr_q_pages,
279 int nr_send_sges, int nr_receive_sges) 272 int wqe_size,
273 int nr_sges)
280{ 274{
281 int ret, cnt, ipz_rc; 275 int ret, cnt, ipz_rc;
282 void *vpage; 276 void *vpage;
@@ -284,104 +278,63 @@ static inline int init_qp_queues(struct ehca_shca *shca,
284 struct ib_device *ib_dev = &shca->ib_device; 278 struct ib_device *ib_dev = &shca->ib_device;
285 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle; 279 struct ipz_adapter_handle ipz_hca_handle = shca->ipz_hca_handle;
286 280
287 ipz_rc = ipz_queue_ctor(&my_qp->ipz_squeue, 281 if (!nr_q_pages)
288 nr_sq_pages, 282 return 0;
289 EHCA_PAGESIZE, swqe_size, nr_send_sges); 283
284 ipz_rc = ipz_queue_ctor(queue, nr_q_pages, EHCA_PAGESIZE,
285 wqe_size, nr_sges);
290 if (!ipz_rc) { 286 if (!ipz_rc) {
291 ehca_err(ib_dev,"Cannot allocate page for squeue. ipz_rc=%x", 287 ehca_err(ib_dev, "Cannot allocate page for queue. ipz_rc=%x",
292 ipz_rc); 288 ipz_rc);
293 return -EBUSY; 289 return -EBUSY;
294 } 290 }
295 291
296 ipz_rc = ipz_queue_ctor(&my_qp->ipz_rqueue, 292 /* register queue pages */
297 nr_rq_pages, 293 for (cnt = 0; cnt < nr_q_pages; cnt++) {
298 EHCA_PAGESIZE, rwqe_size, nr_receive_sges); 294 vpage = ipz_qpageit_get_inc(queue);
299 if (!ipz_rc) {
300 ehca_err(ib_dev, "Cannot allocate page for rqueue. ipz_rc=%x",
301 ipz_rc);
302 ret = -EBUSY;
303 goto init_qp_queues0;
304 }
305 /* register SQ pages */
306 for (cnt = 0; cnt < nr_sq_pages; cnt++) {
307 vpage = ipz_qpageit_get_inc(&my_qp->ipz_squeue);
308 if (!vpage) { 295 if (!vpage) {
309 ehca_err(ib_dev, "SQ ipz_qpageit_get_inc() " 296 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
310 "failed p_vpage= %p", vpage); 297 "failed p_vpage= %p", vpage);
311 ret = -EINVAL; 298 ret = -EINVAL;
312 goto init_qp_queues1; 299 goto init_qp_queue1;
313 } 300 }
314 rpage = virt_to_abs(vpage); 301 rpage = virt_to_abs(vpage);
315 302
316 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle, 303 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
317 my_qp->ipz_qp_handle, 304 my_qp->ipz_qp_handle,
318 &my_qp->pf, 0, 0, 305 NULL, 0, q_type,
319 rpage, 1, 306 rpage, 1,
320 my_qp->galpas.kernel); 307 my_qp->galpas.kernel);
321 if (h_ret < H_SUCCESS) { 308 if (cnt == (nr_q_pages - 1)) { /* last page! */
322 ehca_err(ib_dev, "SQ hipz_qp_register_rpage()" 309 if (h_ret != expected_hret) {
323 " failed rc=%lx", h_ret); 310 ehca_err(ib_dev, "hipz_qp_register_rpage() "
324 ret = ehca2ib_return_code(h_ret);
325 goto init_qp_queues1;
326 }
327 }
328
329 ipz_qeit_reset(&my_qp->ipz_squeue);
330
331 /* register RQ pages */
332 for (cnt = 0; cnt < nr_rq_pages; cnt++) {
333 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
334 if (!vpage) {
335 ehca_err(ib_dev, "RQ ipz_qpageit_get_inc() "
336 "failed p_vpage = %p", vpage);
337 ret = -EINVAL;
338 goto init_qp_queues1;
339 }
340
341 rpage = virt_to_abs(vpage);
342
343 h_ret = hipz_h_register_rpage_qp(ipz_hca_handle,
344 my_qp->ipz_qp_handle,
345 &my_qp->pf, 0, 1,
346 rpage, 1,my_qp->galpas.kernel);
347 if (h_ret < H_SUCCESS) {
348 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() failed "
349 "rc=%lx", h_ret);
350 ret = ehca2ib_return_code(h_ret);
351 goto init_qp_queues1;
352 }
353 if (cnt == (nr_rq_pages - 1)) { /* last page! */
354 if (h_ret != H_SUCCESS) {
355 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() "
356 "h_ret= %lx ", h_ret); 311 "h_ret= %lx ", h_ret);
357 ret = ehca2ib_return_code(h_ret); 312 ret = ehca2ib_return_code(h_ret);
358 goto init_qp_queues1; 313 goto init_qp_queue1;
359 } 314 }
360 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue); 315 vpage = ipz_qpageit_get_inc(&my_qp->ipz_rqueue);
361 if (vpage) { 316 if (vpage) {
362 ehca_err(ib_dev, "ipz_qpageit_get_inc() " 317 ehca_err(ib_dev, "ipz_qpageit_get_inc() "
363 "should not succeed vpage=%p", vpage); 318 "should not succeed vpage=%p", vpage);
364 ret = -EINVAL; 319 ret = -EINVAL;
365 goto init_qp_queues1; 320 goto init_qp_queue1;
366 } 321 }
367 } else { 322 } else {
368 if (h_ret != H_PAGE_REGISTERED) { 323 if (h_ret != H_PAGE_REGISTERED) {
369 ehca_err(ib_dev, "RQ hipz_qp_register_rpage() " 324 ehca_err(ib_dev, "hipz_qp_register_rpage() "
370 "h_ret= %lx ", h_ret); 325 "h_ret= %lx ", h_ret);
371 ret = ehca2ib_return_code(h_ret); 326 ret = ehca2ib_return_code(h_ret);
372 goto init_qp_queues1; 327 goto init_qp_queue1;
373 } 328 }
374 } 329 }
375 } 330 }
376 331
377 ipz_qeit_reset(&my_qp->ipz_rqueue); 332 ipz_qeit_reset(queue);
378 333
379 return 0; 334 return 0;
380 335
381init_qp_queues1: 336init_qp_queue1:
382 ipz_queue_dtor(&my_qp->ipz_rqueue); 337 ipz_queue_dtor(queue);
383init_qp_queues0:
384 ipz_queue_dtor(&my_qp->ipz_squeue);
385 return ret; 338 return ret;
386} 339}
387 340
@@ -397,14 +350,17 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
397 ib_device); 350 ib_device);
398 struct ib_ucontext *context = NULL; 351 struct ib_ucontext *context = NULL;
399 u64 h_ret; 352 u64 h_ret;
400 int max_send_sge, max_recv_sge, ret; 353 int is_llqp = 0, has_srq = 0;
354 int qp_type, max_send_sge, max_recv_sge, ret;
401 355
402 /* h_call's out parameters */ 356 /* h_call's out parameters */
403 struct ehca_alloc_qp_parms parms; 357 struct ehca_alloc_qp_parms parms;
404 u32 swqe_size = 0, rwqe_size = 0; 358 u32 swqe_size = 0, rwqe_size = 0;
405 u8 daqp_completion, isdaqp;
406 unsigned long flags; 359 unsigned long flags;
407 360
361 memset(&parms, 0, sizeof(parms));
362 qp_type = init_attr->qp_type;
363
408 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR && 364 if (init_attr->sq_sig_type != IB_SIGNAL_REQ_WR &&
409 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) { 365 init_attr->sq_sig_type != IB_SIGNAL_ALL_WR) {
410 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed", 366 ehca_err(pd->device, "init_attr->sg_sig_type=%x not allowed",
@@ -412,38 +368,47 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
412 return ERR_PTR(-EINVAL); 368 return ERR_PTR(-EINVAL);
413 } 369 }
414 370
415 /* save daqp completion bits */ 371 /* save LLQP info */
416 daqp_completion = init_attr->qp_type & 0x60; 372 if (qp_type & 0x80) {
417 /* save daqp bit */ 373 is_llqp = 1;
418 isdaqp = (init_attr->qp_type & 0x80) ? 1 : 0; 374 parms.ext_type = EQPT_LLQP;
419 init_attr->qp_type = init_attr->qp_type & 0x1F; 375 parms.ll_comp_flags = qp_type & LLQP_COMP_MASK;
376 }
377 qp_type &= 0x1F;
378
379 /* check for SRQ */
380 has_srq = !!(init_attr->srq);
381 if (is_llqp && has_srq) {
382 ehca_err(pd->device, "LLQPs can't have an SRQ");
383 return ERR_PTR(-EINVAL);
384 }
420 385
421 if (init_attr->qp_type != IB_QPT_UD && 386 /* check QP type */
422 init_attr->qp_type != IB_QPT_SMI && 387 if (qp_type != IB_QPT_UD &&
423 init_attr->qp_type != IB_QPT_GSI && 388 qp_type != IB_QPT_UC &&
424 init_attr->qp_type != IB_QPT_UC && 389 qp_type != IB_QPT_RC &&
425 init_attr->qp_type != IB_QPT_RC) { 390 qp_type != IB_QPT_SMI &&
426 ehca_err(pd->device, "wrong QP Type=%x", init_attr->qp_type); 391 qp_type != IB_QPT_GSI) {
392 ehca_err(pd->device, "wrong QP Type=%x", qp_type);
427 return ERR_PTR(-EINVAL); 393 return ERR_PTR(-EINVAL);
428 } 394 }
429 if ((init_attr->qp_type != IB_QPT_RC && init_attr->qp_type != IB_QPT_UD) 395
430 && isdaqp) { 396 if (is_llqp && (qp_type != IB_QPT_RC && qp_type != IB_QPT_UD)) {
431 ehca_err(pd->device, "unsupported LL QP Type=%x", 397 ehca_err(pd->device, "unsupported LL QP Type=%x", qp_type);
432 init_attr->qp_type);
433 return ERR_PTR(-EINVAL); 398 return ERR_PTR(-EINVAL);
434 } else if (init_attr->qp_type == IB_QPT_RC && isdaqp && 399 } else if (is_llqp && qp_type == IB_QPT_RC &&
435 (init_attr->cap.max_send_wr > 255 || 400 (init_attr->cap.max_send_wr > 255 ||
436 init_attr->cap.max_recv_wr > 255 )) { 401 init_attr->cap.max_recv_wr > 255 )) {
437 ehca_err(pd->device, "Invalid Number of max_sq_wr =%x " 402 ehca_err(pd->device, "Invalid Number of max_sq_wr=%x "
438 "or max_rq_wr=%x for QP Type=%x", 403 "or max_rq_wr=%x for RC LLQP",
439 init_attr->cap.max_send_wr, 404 init_attr->cap.max_send_wr,
440 init_attr->cap.max_recv_wr,init_attr->qp_type); 405 init_attr->cap.max_recv_wr);
441 return ERR_PTR(-EINVAL); 406 return ERR_PTR(-EINVAL);
442 } else if (init_attr->qp_type == IB_QPT_UD && isdaqp && 407 } else if (is_llqp && qp_type == IB_QPT_UD &&
443 init_attr->cap.max_send_wr > 255) { 408 init_attr->cap.max_send_wr > 255) {
444 ehca_err(pd->device, 409 ehca_err(pd->device,
445 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x", 410 "Invalid Number of max_send_wr=%x for UD QP_TYPE=%x",
446 init_attr->cap.max_send_wr, init_attr->qp_type); 411 init_attr->cap.max_send_wr, qp_type);
447 return ERR_PTR(-EINVAL); 412 return ERR_PTR(-EINVAL);
448 } 413 }
449 414
@@ -456,7 +421,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
456 return ERR_PTR(-ENOMEM); 421 return ERR_PTR(-ENOMEM);
457 } 422 }
458 423
459 memset (&parms, 0, sizeof(struct ehca_alloc_qp_parms));
460 spin_lock_init(&my_qp->spinlock_s); 424 spin_lock_init(&my_qp->spinlock_s);
461 spin_lock_init(&my_qp->spinlock_r); 425 spin_lock_init(&my_qp->spinlock_r);
462 426
@@ -465,8 +429,6 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
465 my_qp->send_cq = 429 my_qp->send_cq =
466 container_of(init_attr->send_cq, struct ehca_cq, ib_cq); 430 container_of(init_attr->send_cq, struct ehca_cq, ib_cq);
467 431
468 my_qp->init_attr = *init_attr;
469
470 do { 432 do {
471 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { 433 if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) {
472 ret = -ENOMEM; 434 ret = -ENOMEM;
@@ -486,10 +448,10 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
486 goto create_qp_exit0; 448 goto create_qp_exit0;
487 } 449 }
488 450
489 parms.servicetype = ibqptype2servicetype(init_attr->qp_type); 451 parms.servicetype = ibqptype2servicetype(qp_type);
490 if (parms.servicetype < 0) { 452 if (parms.servicetype < 0) {
491 ret = -EINVAL; 453 ret = -EINVAL;
492 ehca_err(pd->device, "Invalid qp_type=%x", init_attr->qp_type); 454 ehca_err(pd->device, "Invalid qp_type=%x", qp_type);
493 goto create_qp_exit0; 455 goto create_qp_exit0;
494 } 456 }
495 457
@@ -501,21 +463,23 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
501 /* UD_AV CIRCUMVENTION */ 463 /* UD_AV CIRCUMVENTION */
502 max_send_sge = init_attr->cap.max_send_sge; 464 max_send_sge = init_attr->cap.max_send_sge;
503 max_recv_sge = init_attr->cap.max_recv_sge; 465 max_recv_sge = init_attr->cap.max_recv_sge;
504 if (IB_QPT_UD == init_attr->qp_type || 466 if (parms.servicetype == ST_UD) {
505 IB_QPT_GSI == init_attr->qp_type ||
506 IB_QPT_SMI == init_attr->qp_type) {
507 max_send_sge += 2; 467 max_send_sge += 2;
508 max_recv_sge += 2; 468 max_recv_sge += 2;
509 } 469 }
510 470
511 parms.ipz_eq_handle = shca->eq.ipz_eq_handle; 471 parms.token = my_qp->token;
512 parms.daqp_ctrl = isdaqp | daqp_completion; 472 parms.eq_handle = shca->eq.ipz_eq_handle;
513 parms.pd = my_pd->fw_pd; 473 parms.pd = my_pd->fw_pd;
514 parms.max_recv_sge = max_recv_sge; 474 parms.send_cq_handle = my_qp->send_cq->ipz_cq_handle;
515 parms.max_send_sge = max_send_sge; 475 parms.recv_cq_handle = my_qp->recv_cq->ipz_cq_handle;
516 476
517 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, my_qp, &parms); 477 parms.max_send_wr = init_attr->cap.max_send_wr;
478 parms.max_recv_wr = init_attr->cap.max_recv_wr;
479 parms.max_send_sge = max_send_sge;
480 parms.max_recv_sge = max_recv_sge;
518 481
482 h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms);
519 if (h_ret != H_SUCCESS) { 483 if (h_ret != H_SUCCESS) {
520 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx", 484 ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lx",
521 h_ret); 485 h_ret);
@@ -523,16 +487,18 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
523 goto create_qp_exit1; 487 goto create_qp_exit1;
524 } 488 }
525 489
526 my_qp->ib_qp.qp_num = my_qp->real_qp_num; 490 my_qp->ib_qp.qp_num = my_qp->real_qp_num = parms.real_qp_num;
491 my_qp->ipz_qp_handle = parms.qp_handle;
492 my_qp->galpas = parms.galpas;
527 493
528 switch (init_attr->qp_type) { 494 switch (qp_type) {
529 case IB_QPT_RC: 495 case IB_QPT_RC:
530 if (isdaqp == 0) { 496 if (!is_llqp) {
531 swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ 497 swqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
532 (parms.act_nr_send_sges)]); 498 (parms.act_nr_send_sges)]);
533 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[ 499 rwqe_size = offsetof(struct ehca_wqe, u.nud.sg_list[
534 (parms.act_nr_recv_sges)]); 500 (parms.act_nr_recv_sges)]);
535 } else { /* for daqp we need to use msg size, not wqe size */ 501 } else { /* for LLQP we need to use msg size, not wqe size */
536 swqe_size = da_rc_msg_size[max_send_sge]; 502 swqe_size = da_rc_msg_size[max_send_sge];
537 rwqe_size = da_rc_msg_size[max_recv_sge]; 503 rwqe_size = da_rc_msg_size[max_recv_sge];
538 parms.act_nr_send_sges = 1; 504 parms.act_nr_send_sges = 1;
@@ -552,7 +518,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
552 /* UD circumvention */ 518 /* UD circumvention */
553 parms.act_nr_recv_sges -= 2; 519 parms.act_nr_recv_sges -= 2;
554 parms.act_nr_send_sges -= 2; 520 parms.act_nr_send_sges -= 2;
555 if (isdaqp) { 521 if (is_llqp) {
556 swqe_size = da_ud_sq_msg_size[max_send_sge]; 522 swqe_size = da_ud_sq_msg_size[max_send_sge];
557 rwqe_size = da_rc_msg_size[max_recv_sge]; 523 rwqe_size = da_rc_msg_size[max_recv_sge];
558 parms.act_nr_send_sges = 1; 524 parms.act_nr_send_sges = 1;
@@ -564,14 +530,12 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
564 u.ud_av.sg_list[parms.act_nr_recv_sges]); 530 u.ud_av.sg_list[parms.act_nr_recv_sges]);
565 } 531 }
566 532
567 if (IB_QPT_GSI == init_attr->qp_type || 533 if (IB_QPT_GSI == qp_type || IB_QPT_SMI == qp_type) {
568 IB_QPT_SMI == init_attr->qp_type) {
569 parms.act_nr_send_wqes = init_attr->cap.max_send_wr; 534 parms.act_nr_send_wqes = init_attr->cap.max_send_wr;
570 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr; 535 parms.act_nr_recv_wqes = init_attr->cap.max_recv_wr;
571 parms.act_nr_send_sges = init_attr->cap.max_send_sge; 536 parms.act_nr_send_sges = init_attr->cap.max_send_sge;
572 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge; 537 parms.act_nr_recv_sges = init_attr->cap.max_recv_sge;
573 my_qp->ib_qp.qp_num = 538 my_qp->ib_qp.qp_num = (qp_type == IB_QPT_SMI) ? 0 : 1;
574 (init_attr->qp_type == IB_QPT_SMI) ? 0 : 1;
575 } 539 }
576 540
577 break; 541 break;
@@ -580,26 +544,33 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
580 break; 544 break;
581 } 545 }
582 546
583 /* initializes r/squeue and registers queue pages */ 547 /* initialize r/squeue and register queue pages */
584 ret = init_qp_queues(shca, my_qp, 548 ret = init_qp_queue(shca, my_qp, &my_qp->ipz_squeue, 0,
585 parms.nr_sq_pages, parms.nr_rq_pages, 549 has_srq ? H_SUCCESS : H_PAGE_REGISTERED,
586 swqe_size, rwqe_size, 550 parms.nr_sq_pages, swqe_size,
587 parms.act_nr_send_sges, parms.act_nr_recv_sges); 551 parms.act_nr_send_sges);
588 if (ret) { 552 if (ret) {
589 ehca_err(pd->device, 553 ehca_err(pd->device,
590 "Couldn't initialize r/squeue and pages ret=%x", ret); 554 "Couldn't initialize squeue and pages ret=%x", ret);
591 goto create_qp_exit2; 555 goto create_qp_exit2;
592 } 556 }
593 557
558 ret = init_qp_queue(shca, my_qp, &my_qp->ipz_rqueue, 1, H_SUCCESS,
559 parms.nr_rq_pages, rwqe_size,
560 parms.act_nr_recv_sges);
561 if (ret) {
562 ehca_err(pd->device,
563 "Couldn't initialize rqueue and pages ret=%x", ret);
564 goto create_qp_exit3;
565 }
566
594 my_qp->ib_qp.pd = &my_pd->ib_pd; 567 my_qp->ib_qp.pd = &my_pd->ib_pd;
595 my_qp->ib_qp.device = my_pd->ib_pd.device; 568 my_qp->ib_qp.device = my_pd->ib_pd.device;
596 569
597 my_qp->ib_qp.recv_cq = init_attr->recv_cq; 570 my_qp->ib_qp.recv_cq = init_attr->recv_cq;
598 my_qp->ib_qp.send_cq = init_attr->send_cq; 571 my_qp->ib_qp.send_cq = init_attr->send_cq;
599 572
600 my_qp->ib_qp.qp_type = init_attr->qp_type; 573 my_qp->ib_qp.qp_type = my_qp->qp_type = qp_type;
601
602 my_qp->qp_type = init_attr->qp_type;
603 my_qp->ib_qp.srq = init_attr->srq; 574 my_qp->ib_qp.srq = init_attr->srq;
604 575
605 my_qp->ib_qp.qp_context = init_attr->qp_context; 576 my_qp->ib_qp.qp_context = init_attr->qp_context;
@@ -610,15 +581,16 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
610 init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes; 581 init_attr->cap.max_recv_wr = parms.act_nr_recv_wqes;
611 init_attr->cap.max_send_sge = parms.act_nr_send_sges; 582 init_attr->cap.max_send_sge = parms.act_nr_send_sges;
612 init_attr->cap.max_send_wr = parms.act_nr_send_wqes; 583 init_attr->cap.max_send_wr = parms.act_nr_send_wqes;
584 my_qp->init_attr = *init_attr;
613 585
614 /* NOTE: define_apq0() not supported yet */ 586 /* NOTE: define_apq0() not supported yet */
615 if (init_attr->qp_type == IB_QPT_GSI) { 587 if (qp_type == IB_QPT_GSI) {
616 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 588 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
617 if (h_ret != H_SUCCESS) { 589 if (h_ret != H_SUCCESS) {
618 ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx", 590 ehca_err(pd->device, "ehca_define_sqp() failed rc=%lx",
619 h_ret); 591 h_ret);
620 ret = ehca2ib_return_code(h_ret); 592 ret = ehca2ib_return_code(h_ret);
621 goto create_qp_exit3; 593 goto create_qp_exit4;
622 } 594 }
623 } 595 }
624 if (init_attr->send_cq) { 596 if (init_attr->send_cq) {
@@ -628,7 +600,7 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
628 if (ret) { 600 if (ret) {
629 ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x", 601 ehca_err(pd->device, "Couldn't assign qp to send_cq ret=%x",
630 ret); 602 ret);
631 goto create_qp_exit3; 603 goto create_qp_exit4;
632 } 604 }
633 my_qp->send_cq = cq; 605 my_qp->send_cq = cq;
634 } 606 }
@@ -659,14 +631,16 @@ struct ib_qp *ehca_create_qp(struct ib_pd *pd,
659 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 631 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
660 ehca_err(pd->device, "Copy to udata failed"); 632 ehca_err(pd->device, "Copy to udata failed");
661 ret = -EINVAL; 633 ret = -EINVAL;
662 goto create_qp_exit3; 634 goto create_qp_exit4;
663 } 635 }
664 } 636 }
665 637
666 return &my_qp->ib_qp; 638 return &my_qp->ib_qp;
667 639
668create_qp_exit3: 640create_qp_exit4:
669 ipz_queue_dtor(&my_qp->ipz_rqueue); 641 ipz_queue_dtor(&my_qp->ipz_rqueue);
642
643create_qp_exit3:
670 ipz_queue_dtor(&my_qp->ipz_squeue); 644 ipz_queue_dtor(&my_qp->ipz_squeue);
671 645
672create_qp_exit2: 646create_qp_exit2:
diff --git a/drivers/infiniband/hw/ehca/hcp_if.c b/drivers/infiniband/hw/ehca/hcp_if.c
index 5766ae3a2029..7efc4a2ad2b9 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.c
+++ b/drivers/infiniband/hw/ehca/hcp_if.c
@@ -74,11 +74,6 @@
74#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48) 74#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
75#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49) 75#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
76 76
77/* direct access qp controls */
78#define DAQP_CTRL_ENABLE 0x01
79#define DAQP_CTRL_SEND_COMP 0x20
80#define DAQP_CTRL_RECV_COMP 0x40
81
82static u32 get_longbusy_msecs(int longbusy_rc) 77static u32 get_longbusy_msecs(int longbusy_rc)
83{ 78{
84 switch (longbusy_rc) { 79 switch (longbusy_rc) {
@@ -284,36 +279,31 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
284} 279}
285 280
286u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 281u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
287 struct ehca_qp *qp,
288 struct ehca_alloc_qp_parms *parms) 282 struct ehca_alloc_qp_parms *parms)
289{ 283{
290 u64 ret; 284 u64 ret;
291 u64 allocate_controls; 285 u64 allocate_controls;
292 u64 max_r10_reg; 286 u64 max_r10_reg;
293 u64 outs[PLPAR_HCALL9_BUFSIZE]; 287 u64 outs[PLPAR_HCALL9_BUFSIZE];
294 u16 max_nr_receive_wqes = qp->init_attr.cap.max_recv_wr + 1;
295 u16 max_nr_send_wqes = qp->init_attr.cap.max_send_wr + 1;
296 int daqp_ctrl = parms->daqp_ctrl;
297 288
298 allocate_controls = 289 allocate_controls =
299 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, 290 EHCA_BMASK_SET(H_ALL_RES_QP_ENHANCED_OPS, parms->ext_type)
300 (daqp_ctrl & DAQP_CTRL_ENABLE) ? 1 : 0)
301 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0) 291 | EHCA_BMASK_SET(H_ALL_RES_QP_PTE_PIN, 0)
302 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype) 292 | EHCA_BMASK_SET(H_ALL_RES_QP_SERVICE_TYPE, parms->servicetype)
303 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype) 293 | EHCA_BMASK_SET(H_ALL_RES_QP_SIGNALING_TYPE, parms->sigtype)
304 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING, 294 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_RQ_CQE_POSTING,
305 (daqp_ctrl & DAQP_CTRL_RECV_COMP) ? 1 : 0) 295 !!(parms->ll_comp_flags & LLQP_RECV_COMP))
306 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING, 296 | EHCA_BMASK_SET(H_ALL_RES_QP_LL_SQ_CQE_POSTING,
307 (daqp_ctrl & DAQP_CTRL_SEND_COMP) ? 1 : 0) 297 !!(parms->ll_comp_flags & LLQP_SEND_COMP))
308 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL, 298 | EHCA_BMASK_SET(H_ALL_RES_QP_UD_AV_LKEY_CTRL,
309 parms->ud_av_l_key_ctl) 299 parms->ud_av_l_key_ctl)
310 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1); 300 | EHCA_BMASK_SET(H_ALL_RES_QP_RESOURCE_TYPE, 1);
311 301
312 max_r10_reg = 302 max_r10_reg =
313 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR, 303 EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_SEND_WR,
314 max_nr_send_wqes) 304 parms->max_send_wr + 1)
315 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR, 305 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_OUTST_RECV_WR,
316 max_nr_receive_wqes) 306 parms->max_recv_wr + 1)
317 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE, 307 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_SEND_SGE,
318 parms->max_send_sge) 308 parms->max_send_sge)
319 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE, 309 | EHCA_BMASK_SET(H_ALL_RES_QP_MAX_RECV_SGE,
@@ -322,15 +312,16 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
322 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs, 312 ret = ehca_plpar_hcall9(H_ALLOC_RESOURCE, outs,
323 adapter_handle.handle, /* r4 */ 313 adapter_handle.handle, /* r4 */
324 allocate_controls, /* r5 */ 314 allocate_controls, /* r5 */
325 qp->send_cq->ipz_cq_handle.handle, 315 parms->send_cq_handle.handle,
326 qp->recv_cq->ipz_cq_handle.handle, 316 parms->recv_cq_handle.handle,
327 parms->ipz_eq_handle.handle, 317 parms->eq_handle.handle,
328 ((u64)qp->token << 32) | parms->pd.value, 318 ((u64)parms->token << 32) | parms->pd.value,
329 max_r10_reg, /* r10 */ 319 max_r10_reg, /* r10 */
330 parms->ud_av_l_key_ctl, /* r11 */ 320 parms->ud_av_l_key_ctl, /* r11 */
331 0); 321 0);
332 qp->ipz_qp_handle.handle = outs[0]; 322
333 qp->real_qp_num = (u32)outs[1]; 323 parms->qp_handle.handle = outs[0];
324 parms->real_qp_num = (u32)outs[1];
334 parms->act_nr_send_wqes = 325 parms->act_nr_send_wqes =
335 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]); 326 (u16)EHCA_BMASK_GET(H_ALL_RES_QP_ACT_OUTST_SEND_WR, outs[2]);
336 parms->act_nr_recv_wqes = 327 parms->act_nr_recv_wqes =
@@ -345,7 +336,7 @@ u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
345 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]); 336 (u32)EHCA_BMASK_GET(H_ALL_RES_QP_RQUEUE_SIZE_PAGES, outs[4]);
346 337
347 if (ret == H_SUCCESS) 338 if (ret == H_SUCCESS)
348 hcp_galpas_ctor(&qp->galpas, outs[6], outs[6]); 339 hcp_galpas_ctor(&parms->galpas, outs[6], outs[6]);
349 340
350 if (ret == H_NOT_ENOUGH_RESOURCES) 341 if (ret == H_NOT_ENOUGH_RESOURCES)
351 ehca_gen_err("Not enough resources. ret=%lx", ret); 342 ehca_gen_err("Not enough resources. ret=%lx", ret);
diff --git a/drivers/infiniband/hw/ehca/hcp_if.h b/drivers/infiniband/hw/ehca/hcp_if.h
index 2869f7dd6196..60ce02b70663 100644
--- a/drivers/infiniband/hw/ehca/hcp_if.h
+++ b/drivers/infiniband/hw/ehca/hcp_if.h
@@ -78,7 +78,6 @@ u64 hipz_h_alloc_resource_cq(const struct ipz_adapter_handle adapter_handle,
78 * initialize resources, create empty QPPTs (2 rings). 78 * initialize resources, create empty QPPTs (2 rings).
79 */ 79 */
80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle, 80u64 hipz_h_alloc_resource_qp(const struct ipz_adapter_handle adapter_handle,
81 struct ehca_qp *qp,
82 struct ehca_alloc_qp_parms *parms); 81 struct ehca_alloc_qp_parms *parms);
83 82
84u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle, 83u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,