aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/ehca
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/ehca')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_av.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h23
-rw-r--r--drivers/infiniband/hw/ehca/ehca_cq.c2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c38
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c15
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c180
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c112
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c6
9 files changed, 320 insertions, 60 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_av.c b/drivers/infiniband/hw/ehca/ehca_av.c
index f7782c882ab4..194c1c30cf63 100644
--- a/drivers/infiniband/hw/ehca/ehca_av.c
+++ b/drivers/infiniband/hw/ehca/ehca_av.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER 2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 * 3 *
4 * adress vector functions 4 * address vector functions
5 * 5 *
6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com> 6 * Authors: Hoang-Nam Nguyen <hnguyen@de.ibm.com>
7 * Khadija Souissi <souissik@de.ibm.com> 7 * Khadija Souissi <souissik@de.ibm.com>
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 74d2b72a11d8..f281d16040f5 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -94,7 +94,11 @@ struct ehca_sma_attr {
94 94
95struct ehca_sport { 95struct ehca_sport {
96 struct ib_cq *ibcq_aqp1; 96 struct ib_cq *ibcq_aqp1;
97 struct ib_qp *ibqp_aqp1; 97 struct ib_qp *ibqp_sqp[2];
98 /* lock to serialze modify_qp() calls for sqp in normal
99 * and irq path (when event PORT_ACTIVE is received first time)
100 */
101 spinlock_t mod_sqp_lock;
98 enum ib_port_state port_state; 102 enum ib_port_state port_state;
99 struct ehca_sma_attr saved_attr; 103 struct ehca_sma_attr saved_attr;
100}; 104};
@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
141 EQPT_SRQ = 3, 145 EQPT_SRQ = 3,
142}; 146};
143 147
148/* struct to cache modify_qp()'s parms for GSI/SMI qp */
149struct ehca_mod_qp_parm {
150 int mask;
151 struct ib_qp_attr attr;
152};
153
154#define EHCA_MOD_QP_PARM_MAX 4
155
144struct ehca_qp { 156struct ehca_qp {
145 union { 157 union {
146 struct ib_qp ib_qp; 158 struct ib_qp ib_qp;
@@ -164,10 +176,18 @@ struct ehca_qp {
164 struct ehca_cq *recv_cq; 176 struct ehca_cq *recv_cq;
165 unsigned int sqerr_purgeflag; 177 unsigned int sqerr_purgeflag;
166 struct hlist_node list_entries; 178 struct hlist_node list_entries;
179 /* array to cache modify_qp()'s parms for GSI/SMI qp */
180 struct ehca_mod_qp_parm *mod_qp_parm;
181 int mod_qp_parm_idx;
167 /* mmap counter for resources mapped into user space */ 182 /* mmap counter for resources mapped into user space */
168 u32 mm_count_squeue; 183 u32 mm_count_squeue;
169 u32 mm_count_rqueue; 184 u32 mm_count_rqueue;
170 u32 mm_count_galpa; 185 u32 mm_count_galpa;
186 /* unsolicited ack circumvention */
187 int unsol_ack_circ;
188 int mtu_shift;
189 u32 message_count;
190 u32 packet_count;
171}; 191};
172 192
173#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ) 193#define IS_SRQ(qp) (qp->ext_type == EQPT_SRQ)
@@ -323,6 +343,7 @@ extern int ehca_port_act_time;
323extern int ehca_use_hp_mr; 343extern int ehca_use_hp_mr;
324extern int ehca_scaling_code; 344extern int ehca_scaling_code;
325extern int ehca_lock_hcalls; 345extern int ehca_lock_hcalls;
346extern int ehca_nr_ports;
326 347
327struct ipzu_queue_resp { 348struct ipzu_queue_resp {
328 u32 qe_size; /* queue entry size */ 349 u32 qe_size; /* queue entry size */
diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c
index 79c25f51c21e..0467c158d4a9 100644
--- a/drivers/infiniband/hw/ehca/ehca_cq.c
+++ b/drivers/infiniband/hw/ehca/ehca_cq.c
@@ -246,7 +246,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,
246 } else { 246 } else {
247 if (h_ret != H_PAGE_REGISTERED) { 247 if (h_ret != H_PAGE_REGISTERED) {
248 ehca_err(device, "Registration of page failed " 248 ehca_err(device, "Registration of page failed "
249 "ehca_cq=%p cq_num=%x h_ret=%li" 249 "ehca_cq=%p cq_num=%x h_ret=%li "
250 "counter=%i act_pages=%i", 250 "counter=%i act_pages=%i",
251 my_cq, my_cq->cq_number, 251 my_cq, my_cq->cq_number,
252 h_ret, counter, param.act_pages); 252 h_ret, counter, param.act_pages);
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 3f617b27b954..863b34fa9ff9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -62,6 +62,7 @@
62#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15) 62#define NEQE_PORT_NUMBER EHCA_BMASK_IBM( 8, 15)
63#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16) 63#define NEQE_PORT_AVAILABILITY EHCA_BMASK_IBM(16, 16)
64#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16) 64#define NEQE_DISRUPTIVE EHCA_BMASK_IBM(16, 16)
65#define NEQE_SPECIFIC_EVENT EHCA_BMASK_IBM(16, 23)
65 66
66#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63) 67#define ERROR_DATA_LENGTH EHCA_BMASK_IBM(52, 63)
67#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7) 68#define ERROR_DATA_TYPE EHCA_BMASK_IBM( 0, 7)
@@ -354,17 +355,34 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
354{ 355{
355 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); 356 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
356 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); 357 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
358 u8 spec_event;
359 struct ehca_sport *sport = &shca->sport[port - 1];
360 unsigned long flags;
357 361
358 switch (ec) { 362 switch (ec) {
359 case 0x30: /* port availability change */ 363 case 0x30: /* port availability change */
360 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { 364 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
361 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 365 int suppress_event;
366 /* replay modify_qp for sqps */
367 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
368 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
369 if (sport->ibqp_sqp[IB_QPT_SMI])
370 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
371 if (!suppress_event)
372 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
373 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
374
375 /* AQP1 was destroyed, ignore this event */
376 if (suppress_event)
377 break;
378
379 sport->port_state = IB_PORT_ACTIVE;
362 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 380 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
363 "is active"); 381 "is active");
364 ehca_query_sma_attr(shca, port, 382 ehca_query_sma_attr(shca, port,
365 &shca->sport[port - 1].saved_attr); 383 &sport->saved_attr);
366 } else { 384 } else {
367 shca->sport[port - 1].port_state = IB_PORT_DOWN; 385 sport->port_state = IB_PORT_DOWN;
368 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 386 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
369 "is inactive"); 387 "is inactive");
370 } 388 }
@@ -378,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
378 ehca_warn(&shca->ib_device, "disruptive port " 396 ehca_warn(&shca->ib_device, "disruptive port "
379 "%d configuration change", port); 397 "%d configuration change", port);
380 398
381 shca->sport[port - 1].port_state = IB_PORT_DOWN; 399 sport->port_state = IB_PORT_DOWN;
382 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 400 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
383 "is inactive"); 401 "is inactive");
384 402
385 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 403 sport->port_state = IB_PORT_ACTIVE;
386 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 404 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
387 "is active"); 405 "is active");
388 } else 406 } else
@@ -394,6 +412,16 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
394 case 0x33: /* trace stopped */ 412 case 0x33: /* trace stopped */
395 ehca_err(&shca->ib_device, "Traced stopped."); 413 ehca_err(&shca->ib_device, "Traced stopped.");
396 break; 414 break;
415 case 0x34: /* util async event */
416 spec_event = EHCA_BMASK_GET(NEQE_SPECIFIC_EVENT, eqe);
417 if (spec_event == 0x80) /* client reregister required */
418 dispatch_port_event(shca, port,
419 IB_EVENT_CLIENT_REREGISTER,
420 "client reregister req.");
421 else
422 ehca_warn(&shca->ib_device, "Unknown util async "
423 "event %x on port %x", spec_event, port);
424 break;
397 default: 425 default:
398 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.", 426 ehca_err(&shca->ib_device, "Unknown event code: %x on %s.",
399 ec, shca->ib_device.name); 427 ec, shca->ib_device.name);
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 5485799cdc8d..c469bfde2708 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
200#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 200#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
201#endif 201#endif
202 202
203void ehca_recover_sqp(struct ib_qp *sqp);
204
203#endif 205#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c9e32b46387f..84c9b7b8669b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
90 "hardware level" 90 "hardware level"
91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
92MODULE_PARM_DESC(nr_ports, 92MODULE_PARM_DESC(nr_ports,
93 "number of connected ports (default: 2)"); 93 "number of connected ports (-1: autodetect, 1: port one only, "
94 "2: two ports (default)");
94MODULE_PARM_DESC(use_hp_mr, 95MODULE_PARM_DESC(use_hp_mr,
95 "high performance MRs (0: no (default), 1: yes)"); 96 "high performance MRs (0: no (default), 1: yes)");
96MODULE_PARM_DESC(port_act_time, 97MODULE_PARM_DESC(port_act_time,
@@ -511,7 +512,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
511 } 512 }
512 sport->ibcq_aqp1 = ibcq; 513 sport->ibcq_aqp1 = ibcq;
513 514
514 if (sport->ibqp_aqp1) { 515 if (sport->ibqp_sqp[IB_QPT_GSI]) {
515 ehca_err(&shca->ib_device, "AQP1 QP is already created."); 516 ehca_err(&shca->ib_device, "AQP1 QP is already created.");
516 ret = -EPERM; 517 ret = -EPERM;
517 goto create_aqp1; 518 goto create_aqp1;
@@ -537,7 +538,7 @@ static int ehca_create_aqp1(struct ehca_shca *shca, u32 port)
537 ret = PTR_ERR(ibqp); 538 ret = PTR_ERR(ibqp);
538 goto create_aqp1; 539 goto create_aqp1;
539 } 540 }
540 sport->ibqp_aqp1 = ibqp; 541 sport->ibqp_sqp[IB_QPT_GSI] = ibqp;
541 542
542 return 0; 543 return 0;
543 544
@@ -550,7 +551,7 @@ static int ehca_destroy_aqp1(struct ehca_sport *sport)
550{ 551{
551 int ret; 552 int ret;
552 553
553 ret = ib_destroy_qp(sport->ibqp_aqp1); 554 ret = ib_destroy_qp(sport->ibqp_sqp[IB_QPT_GSI]);
554 if (ret) { 555 if (ret) {
555 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret); 556 ehca_gen_err("Cannot destroy AQP1 QP. ret=%i", ret);
556 return ret; 557 return ret;
@@ -693,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
693 struct ehca_shca *shca; 694 struct ehca_shca *shca;
694 const u64 *handle; 695 const u64 *handle;
695 struct ib_pd *ibpd; 696 struct ib_pd *ibpd;
696 int ret; 697 int ret, i;
697 698
698 handle = of_get_property(dev->node, "ibm,hca-handle", NULL); 699 handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
699 if (!handle) { 700 if (!handle) {
@@ -714,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
714 return -ENOMEM; 715 return -ENOMEM;
715 } 716 }
716 mutex_init(&shca->modify_mutex); 717 mutex_init(&shca->modify_mutex);
718 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
719 spin_lock_init(&shca->sport[i].mod_sqp_lock);
717 720
718 shca->ofdev = dev; 721 shca->ofdev = dev;
719 shca->ipz_hca_handle.handle = *handle; 722 shca->ipz_hca_handle.handle = *handle;
@@ -934,7 +937,7 @@ void ehca_poll_eqs(unsigned long data)
934 ehca_process_eq(shca, 0); 937 ehca_process_eq(shca, 0);
935 } 938 }
936 } 939 }
937 mod_timer(&poll_eqs_timer, jiffies + HZ); 940 mod_timer(&poll_eqs_timer, round_jiffies(jiffies + HZ));
938 spin_unlock(&shca_list_lock); 941 spin_unlock(&shca_list_lock);
939} 942}
940 943
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index eff5fb55604b..1012f15a7140 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -592,10 +592,8 @@ static struct ehca_qp *internal_create_qp(
592 goto create_qp_exit1; 592 goto create_qp_exit1;
593 } 593 }
594 594
595 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) 595 /* Always signal by WQE so we can hide circ. WQEs */
596 parms.sigtype = HCALL_SIGT_EVERY; 596 parms.sigtype = HCALL_SIGT_BY_WQE;
597 else
598 parms.sigtype = HCALL_SIGT_BY_WQE;
599 597
600 /* UD_AV CIRCUMVENTION */ 598 /* UD_AV CIRCUMVENTION */
601 max_send_sge = init_attr->cap.max_send_sge; 599 max_send_sge = init_attr->cap.max_send_sge;
@@ -618,6 +616,10 @@ static struct ehca_qp *internal_create_qp(
618 parms.squeue.max_sge = max_send_sge; 616 parms.squeue.max_sge = max_send_sge;
619 parms.rqueue.max_sge = max_recv_sge; 617 parms.rqueue.max_sge = max_recv_sge;
620 618
619 /* RC QPs need one more SWQE for unsolicited ack circumvention */
620 if (qp_type == IB_QPT_RC)
621 parms.squeue.max_wr++;
622
621 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) { 623 if (EHCA_BMASK_GET(HCA_CAP_MINI_QP, shca->hca_cap)) {
622 if (HAS_SQ(my_qp)) 624 if (HAS_SQ(my_qp))
623 ehca_determine_small_queue( 625 ehca_determine_small_queue(
@@ -650,6 +652,8 @@ static struct ehca_qp *internal_create_qp(
650 parms.squeue.act_nr_sges = 1; 652 parms.squeue.act_nr_sges = 1;
651 parms.rqueue.act_nr_sges = 1; 653 parms.rqueue.act_nr_sges = 1;
652 } 654 }
655 /* hide the extra WQE */
656 parms.squeue.act_nr_wqes--;
653 break; 657 break;
654 case IB_QPT_UD: 658 case IB_QPT_UD:
655 case IB_QPT_GSI: 659 case IB_QPT_GSI:
@@ -729,12 +733,31 @@ static struct ehca_qp *internal_create_qp(
729 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes; 733 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
730 my_qp->init_attr = *init_attr; 734 my_qp->init_attr = *init_attr;
731 735
736 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
737 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
738 &my_qp->ib_qp;
739 if (ehca_nr_ports < 0) {
740 /* alloc array to cache subsequent modify qp parms
741 * for autodetect mode
742 */
743 my_qp->mod_qp_parm =
744 kzalloc(EHCA_MOD_QP_PARM_MAX *
745 sizeof(*my_qp->mod_qp_parm),
746 GFP_KERNEL);
747 if (!my_qp->mod_qp_parm) {
748 ehca_err(pd->device,
749 "Could not alloc mod_qp_parm");
750 goto create_qp_exit4;
751 }
752 }
753 }
754
732 /* NOTE: define_apq0() not supported yet */ 755 /* NOTE: define_apq0() not supported yet */
733 if (qp_type == IB_QPT_GSI) { 756 if (qp_type == IB_QPT_GSI) {
734 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 757 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
735 if (h_ret != H_SUCCESS) { 758 if (h_ret != H_SUCCESS) {
736 ret = ehca2ib_return_code(h_ret); 759 ret = ehca2ib_return_code(h_ret);
737 goto create_qp_exit4; 760 goto create_qp_exit5;
738 } 761 }
739 } 762 }
740 763
@@ -743,7 +766,7 @@ static struct ehca_qp *internal_create_qp(
743 if (ret) { 766 if (ret) {
744 ehca_err(pd->device, 767 ehca_err(pd->device,
745 "Couldn't assign qp to send_cq ret=%i", ret); 768 "Couldn't assign qp to send_cq ret=%i", ret);
746 goto create_qp_exit4; 769 goto create_qp_exit5;
747 } 770 }
748 } 771 }
749 772
@@ -769,12 +792,18 @@ static struct ehca_qp *internal_create_qp(
769 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 792 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
770 ehca_err(pd->device, "Copy to udata failed"); 793 ehca_err(pd->device, "Copy to udata failed");
771 ret = -EINVAL; 794 ret = -EINVAL;
772 goto create_qp_exit4; 795 goto create_qp_exit6;
773 } 796 }
774 } 797 }
775 798
776 return my_qp; 799 return my_qp;
777 800
801create_qp_exit6:
802 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
803
804create_qp_exit5:
805 kfree(my_qp->mod_qp_parm);
806
778create_qp_exit4: 807create_qp_exit4:
779 if (HAS_RQ(my_qp)) 808 if (HAS_RQ(my_qp))
780 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 809 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
@@ -858,7 +887,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
858 update_mask, 887 update_mask,
859 mqpcb, my_qp->galpas.kernel); 888 mqpcb, my_qp->galpas.kernel);
860 if (hret != H_SUCCESS) { 889 if (hret != H_SUCCESS) {
861 ehca_err(pd->device, "Could not modify SRQ to INIT" 890 ehca_err(pd->device, "Could not modify SRQ to INIT "
862 "ehca_qp=%p qp_num=%x h_ret=%li", 891 "ehca_qp=%p qp_num=%x h_ret=%li",
863 my_qp, my_qp->real_qp_num, hret); 892 my_qp, my_qp->real_qp_num, hret);
864 goto create_srq2; 893 goto create_srq2;
@@ -872,7 +901,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
872 update_mask, 901 update_mask,
873 mqpcb, my_qp->galpas.kernel); 902 mqpcb, my_qp->galpas.kernel);
874 if (hret != H_SUCCESS) { 903 if (hret != H_SUCCESS) {
875 ehca_err(pd->device, "Could not enable SRQ" 904 ehca_err(pd->device, "Could not enable SRQ "
876 "ehca_qp=%p qp_num=%x h_ret=%li", 905 "ehca_qp=%p qp_num=%x h_ret=%li",
877 my_qp, my_qp->real_qp_num, hret); 906 my_qp, my_qp->real_qp_num, hret);
878 goto create_srq2; 907 goto create_srq2;
@@ -886,7 +915,7 @@ struct ib_srq *ehca_create_srq(struct ib_pd *pd,
886 update_mask, 915 update_mask,
887 mqpcb, my_qp->galpas.kernel); 916 mqpcb, my_qp->galpas.kernel);
888 if (hret != H_SUCCESS) { 917 if (hret != H_SUCCESS) {
889 ehca_err(pd->device, "Could not modify SRQ to RTR" 918 ehca_err(pd->device, "Could not modify SRQ to RTR "
890 "ehca_qp=%p qp_num=%x h_ret=%li", 919 "ehca_qp=%p qp_num=%x h_ret=%li",
891 my_qp, my_qp->real_qp_num, hret); 920 my_qp, my_qp->real_qp_num, hret);
892 goto create_srq2; 921 goto create_srq2;
@@ -992,7 +1021,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
992 unsigned long flags = 0; 1021 unsigned long flags = 0;
993 1022
994 /* do query_qp to obtain current attr values */ 1023 /* do query_qp to obtain current attr values */
995 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1024 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
996 if (!mqpcb) { 1025 if (!mqpcb) {
997 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 1026 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
998 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 1027 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1180,6 +1209,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1180 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); 1209 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1181 } 1210 }
1182 if (attr_mask & IB_QP_PORT) { 1211 if (attr_mask & IB_QP_PORT) {
1212 struct ehca_sport *sport;
1213 struct ehca_qp *aqp1;
1183 if (attr->port_num < 1 || attr->port_num > shca->num_ports) { 1214 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1184 ret = -EINVAL; 1215 ret = -EINVAL;
1185 ehca_err(ibqp->device, "Invalid port=%x. " 1216 ehca_err(ibqp->device, "Invalid port=%x. "
@@ -1188,6 +1219,29 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1188 shca->num_ports); 1219 shca->num_ports);
1189 goto modify_qp_exit2; 1220 goto modify_qp_exit2;
1190 } 1221 }
1222 sport = &shca->sport[attr->port_num - 1];
1223 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1224 /* should not occur */
1225 ret = -EFAULT;
1226 ehca_err(ibqp->device, "AQP1 was not created for "
1227 "port=%x", attr->port_num);
1228 goto modify_qp_exit2;
1229 }
1230 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1231 struct ehca_qp, ib_qp);
1232 if (ibqp->qp_type != IB_QPT_GSI &&
1233 ibqp->qp_type != IB_QPT_SMI &&
1234 aqp1->mod_qp_parm) {
1235 /*
1236 * firmware will reject this modify_qp() because
1237 * port is not activated/initialized fully
1238 */
1239 ret = -EFAULT;
1240 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1241 "either port is being activated (try again) "
1242 "or cabling issue", attr->port_num);
1243 goto modify_qp_exit2;
1244 }
1191 mqpcb->prim_phys_port = attr->port_num; 1245 mqpcb->prim_phys_port = attr->port_num;
1192 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); 1246 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1193 } 1247 }
@@ -1244,6 +1298,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1244 } 1298 }
1245 1299
1246 if (attr_mask & IB_QP_PATH_MTU) { 1300 if (attr_mask & IB_QP_PATH_MTU) {
1301 /* store ld(MTU) */
1302 my_qp->mtu_shift = attr->path_mtu + 7;
1247 mqpcb->path_mtu = attr->path_mtu; 1303 mqpcb->path_mtu = attr->path_mtu;
1248 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1); 1304 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PATH_MTU, 1);
1249 } 1305 }
@@ -1467,6 +1523,8 @@ modify_qp_exit1:
1467int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1523int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1468 struct ib_udata *udata) 1524 struct ib_udata *udata)
1469{ 1525{
1526 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1527 ib_device);
1470 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1528 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1471 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1529 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1472 ib_pd); 1530 ib_pd);
@@ -1479,9 +1537,100 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1479 return -EINVAL; 1537 return -EINVAL;
1480 } 1538 }
1481 1539
1540 /* The if-block below caches qp_attr to be modified for GSI and SMI
1541 * qps during the initialization by ib_mad. When the respective port
1542 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1543 * cached modify calls sequence, see ehca_recover_sqs() below.
1544 * Why that is required:
1545 * 1) If one port is connected, older code requires that port one
1546 * to be connected and module option nr_ports=1 to be given by
1547 * user, which is very inconvenient for end user.
1548 * 2) Firmware accepts modify_qp() only if respective port has become
1549 * active. Older code had a wait loop of 30sec create_qp()/
1550 * define_aqp1(), which is not appropriate in practice. This
1551 * code now removes that wait loop, see define_aqp1(), and always
1552 * reports all ports to ib_mad resp. users. Only activated ports
1553 * will then usable for the users.
1554 */
1555 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1556 int port = my_qp->init_attr.port_num;
1557 struct ehca_sport *sport = &shca->sport[port - 1];
1558 unsigned long flags;
1559 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1560 /* cache qp_attr only during init */
1561 if (my_qp->mod_qp_parm) {
1562 struct ehca_mod_qp_parm *p;
1563 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1564 ehca_err(&shca->ib_device,
1565 "mod_qp_parm overflow state=%x port=%x"
1566 " type=%x", attr->qp_state,
1567 my_qp->init_attr.port_num,
1568 ibqp->qp_type);
1569 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1570 flags);
1571 return -EINVAL;
1572 }
1573 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1574 p->mask = attr_mask;
1575 p->attr = *attr;
1576 my_qp->mod_qp_parm_idx++;
1577 ehca_dbg(&shca->ib_device,
1578 "Saved qp_attr for state=%x port=%x type=%x",
1579 attr->qp_state, my_qp->init_attr.port_num,
1580 ibqp->qp_type);
1581 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1582 return 0;
1583 }
1584 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1585 }
1586
1482 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1587 return internal_modify_qp(ibqp, attr, attr_mask, 0);
1483} 1588}
1484 1589
1590void ehca_recover_sqp(struct ib_qp *sqp)
1591{
1592 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1593 int port = my_sqp->init_attr.port_num;
1594 struct ib_qp_attr attr;
1595 struct ehca_mod_qp_parm *qp_parm;
1596 int i, qp_parm_idx, ret;
1597 unsigned long flags, wr_cnt;
1598
1599 if (!my_sqp->mod_qp_parm)
1600 return;
1601 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1602
1603 qp_parm = my_sqp->mod_qp_parm;
1604 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1605 for (i = 0; i < qp_parm_idx; i++) {
1606 attr = qp_parm[i].attr;
1607 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1608 if (ret) {
1609 ehca_err(sqp->device, "Could not modify SQP port=%x "
1610 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1611 goto free_qp_parm;
1612 }
1613 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1614 port, sqp->qp_num, attr.qp_state);
1615 }
1616
1617 /* re-trigger posted recv wrs */
1618 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1619 my_sqp->ipz_rqueue.qe_size;
1620 if (wr_cnt) {
1621 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1622 hipz_update_rqa(my_sqp, wr_cnt);
1623 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1624 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1625 port, sqp->qp_num, wr_cnt);
1626 }
1627
1628free_qp_parm:
1629 kfree(qp_parm);
1630 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1631 my_sqp->mod_qp_parm = NULL;
1632}
1633
1485int ehca_query_qp(struct ib_qp *qp, 1634int ehca_query_qp(struct ib_qp *qp,
1486 struct ib_qp_attr *qp_attr, 1635 struct ib_qp_attr *qp_attr,
1487 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1636 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
@@ -1769,6 +1918,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1769 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device); 1918 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
1770 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1919 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1771 ib_pd); 1920 ib_pd);
1921 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
1772 u32 cur_pid = current->tgid; 1922 u32 cur_pid = current->tgid;
1773 u32 qp_num = my_qp->real_qp_num; 1923 u32 qp_num = my_qp->real_qp_num;
1774 int ret; 1924 int ret;
@@ -1815,6 +1965,14 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1815 port_num = my_qp->init_attr.port_num; 1965 port_num = my_qp->init_attr.port_num;
1816 qp_type = my_qp->init_attr.qp_type; 1966 qp_type = my_qp->init_attr.qp_type;
1817 1967
1968 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
1969 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1970 kfree(my_qp->mod_qp_parm);
1971 my_qp->mod_qp_parm = NULL;
1972 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
1973 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1974 }
1975
1818 /* no support for IB_QPT_SMI yet */ 1976 /* no support for IB_QPT_SMI yet */
1819 if (qp_type == IB_QPT_GSI) { 1977 if (qp_type == IB_QPT_GSI) {
1820 struct ib_event event; 1978 struct ib_event event;
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index ea91360835d3..3aacc8cf1e44 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -50,6 +50,9 @@
50#include "hcp_if.h" 50#include "hcp_if.h"
51#include "hipz_fns.h" 51#include "hipz_fns.h"
52 52
53/* in RC traffic, insert an empty RDMA READ every this many packets */
54#define ACK_CIRC_THRESHOLD 2000000
55
53static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue, 56static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
54 struct ehca_wqe *wqe_p, 57 struct ehca_wqe *wqe_p,
55 struct ib_recv_wr *recv_wr) 58 struct ib_recv_wr *recv_wr)
@@ -81,7 +84,7 @@ static inline int ehca_write_rwqe(struct ipz_queue *ipz_rqueue,
81 if (ehca_debug_level) { 84 if (ehca_debug_level) {
82 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p", 85 ehca_gen_dbg("RECEIVE WQE written into ipz_rqueue=%p",
83 ipz_rqueue); 86 ipz_rqueue);
84 ehca_dmp( wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe"); 87 ehca_dmp(wqe_p, 16*(6 + wqe_p->nr_of_data_seg), "recv wqe");
85 } 88 }
86 89
87 return 0; 90 return 0;
@@ -135,7 +138,8 @@ static void trace_send_wr_ud(const struct ib_send_wr *send_wr)
135 138
136static inline int ehca_write_swqe(struct ehca_qp *qp, 139static inline int ehca_write_swqe(struct ehca_qp *qp,
137 struct ehca_wqe *wqe_p, 140 struct ehca_wqe *wqe_p,
138 const struct ib_send_wr *send_wr) 141 const struct ib_send_wr *send_wr,
142 int hidden)
139{ 143{
140 u32 idx; 144 u32 idx;
141 u64 dma_length; 145 u64 dma_length;
@@ -176,7 +180,9 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
176 180
177 wqe_p->wr_flag = 0; 181 wqe_p->wr_flag = 0;
178 182
179 if (send_wr->send_flags & IB_SEND_SIGNALED) 183 if ((send_wr->send_flags & IB_SEND_SIGNALED ||
184 qp->init_attr.sq_sig_type == IB_SIGNAL_ALL_WR)
185 && !hidden)
180 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM; 186 wqe_p->wr_flag |= WQE_WRFLAG_REQ_SIGNAL_COM;
181 187
182 if (send_wr->opcode == IB_WR_SEND_WITH_IMM || 188 if (send_wr->opcode == IB_WR_SEND_WITH_IMM ||
@@ -199,7 +205,7 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
199 205
200 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8; 206 wqe_p->destination_qp_number = send_wr->wr.ud.remote_qpn << 8;
201 wqe_p->local_ee_context_qkey = remote_qkey; 207 wqe_p->local_ee_context_qkey = remote_qkey;
202 if (!send_wr->wr.ud.ah) { 208 if (unlikely(!send_wr->wr.ud.ah)) {
203 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp); 209 ehca_gen_err("wr.ud.ah is NULL. qp=%p", qp);
204 return -EINVAL; 210 return -EINVAL;
205 } 211 }
@@ -255,6 +261,15 @@ static inline int ehca_write_swqe(struct ehca_qp *qp,
255 } /* eof idx */ 261 } /* eof idx */
256 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length; 262 wqe_p->u.nud.atomic_1st_op_dma_len = dma_length;
257 263
264 /* unsolicited ack circumvention */
265 if (send_wr->opcode == IB_WR_RDMA_READ) {
266 /* on RDMA read, switch on and reset counters */
267 qp->message_count = qp->packet_count = 0;
268 qp->unsol_ack_circ = 1;
269 } else
270 /* else estimate #packets */
271 qp->packet_count += (dma_length >> qp->mtu_shift) + 1;
272
258 break; 273 break;
259 274
260 default: 275 default:
@@ -355,13 +370,49 @@ static inline void map_ib_wc_status(u32 cqe_status,
355 *wc_status = IB_WC_SUCCESS; 370 *wc_status = IB_WC_SUCCESS;
356} 371}
357 372
373static inline int post_one_send(struct ehca_qp *my_qp,
374 struct ib_send_wr *cur_send_wr,
375 struct ib_send_wr **bad_send_wr,
376 int hidden)
377{
378 struct ehca_wqe *wqe_p;
379 int ret;
380 u64 start_offset = my_qp->ipz_squeue.current_q_offset;
381
382 /* get pointer next to free WQE */
383 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
384 if (unlikely(!wqe_p)) {
385 /* too many posted work requests: queue overflow */
386 if (bad_send_wr)
387 *bad_send_wr = cur_send_wr;
388 ehca_err(my_qp->ib_qp.device, "Too many posted WQEs "
389 "qp_num=%x", my_qp->ib_qp.qp_num);
390 return -ENOMEM;
391 }
392 /* write a SEND WQE into the QUEUE */
393 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr, hidden);
394 /*
395 * if something failed,
396 * reset the free entry pointer to the start value
397 */
398 if (unlikely(ret)) {
399 my_qp->ipz_squeue.current_q_offset = start_offset;
400 if (bad_send_wr)
401 *bad_send_wr = cur_send_wr;
402 ehca_err(my_qp->ib_qp.device, "Could not write WQE "
403 "qp_num=%x", my_qp->ib_qp.qp_num);
404 return -EINVAL;
405 }
406
407 return 0;
408}
409
358int ehca_post_send(struct ib_qp *qp, 410int ehca_post_send(struct ib_qp *qp,
359 struct ib_send_wr *send_wr, 411 struct ib_send_wr *send_wr,
360 struct ib_send_wr **bad_send_wr) 412 struct ib_send_wr **bad_send_wr)
361{ 413{
362 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp); 414 struct ehca_qp *my_qp = container_of(qp, struct ehca_qp, ib_qp);
363 struct ib_send_wr *cur_send_wr; 415 struct ib_send_wr *cur_send_wr;
364 struct ehca_wqe *wqe_p;
365 int wqe_cnt = 0; 416 int wqe_cnt = 0;
366 int ret = 0; 417 int ret = 0;
367 unsigned long flags; 418 unsigned long flags;
@@ -369,37 +420,33 @@ int ehca_post_send(struct ib_qp *qp,
369 /* LOCK the QUEUE */ 420 /* LOCK the QUEUE */
370 spin_lock_irqsave(&my_qp->spinlock_s, flags); 421 spin_lock_irqsave(&my_qp->spinlock_s, flags);
371 422
423 /* Send an empty extra RDMA read if:
424 * 1) there has been an RDMA read on this connection before
425 * 2) no RDMA read occurred for ACK_CIRC_THRESHOLD link packets
426 * 3) we can be sure that any previous extra RDMA read has been
427 * processed so we don't overflow the SQ
428 */
429 if (unlikely(my_qp->unsol_ack_circ &&
430 my_qp->packet_count > ACK_CIRC_THRESHOLD &&
431 my_qp->message_count > my_qp->init_attr.cap.max_send_wr)) {
432 /* insert an empty RDMA READ to fix up the remote QP state */
433 struct ib_send_wr circ_wr;
434 memset(&circ_wr, 0, sizeof(circ_wr));
435 circ_wr.opcode = IB_WR_RDMA_READ;
436 post_one_send(my_qp, &circ_wr, NULL, 1); /* ignore retcode */
437 wqe_cnt++;
438 ehca_dbg(qp->device, "posted circ wr qp_num=%x", qp->qp_num);
439 my_qp->message_count = my_qp->packet_count = 0;
440 }
441
372 /* loop processes list of send reqs */ 442 /* loop processes list of send reqs */
373 for (cur_send_wr = send_wr; cur_send_wr != NULL; 443 for (cur_send_wr = send_wr; cur_send_wr != NULL;
374 cur_send_wr = cur_send_wr->next) { 444 cur_send_wr = cur_send_wr->next) {
375 u64 start_offset = my_qp->ipz_squeue.current_q_offset; 445 ret = post_one_send(my_qp, cur_send_wr, bad_send_wr, 0);
376 /* get pointer next to free WQE */
377 wqe_p = ipz_qeit_get_inc(&my_qp->ipz_squeue);
378 if (unlikely(!wqe_p)) {
379 /* too many posted work requests: queue overflow */
380 if (bad_send_wr)
381 *bad_send_wr = cur_send_wr;
382 if (wqe_cnt == 0) {
383 ret = -ENOMEM;
384 ehca_err(qp->device, "Too many posted WQEs "
385 "qp_num=%x", qp->qp_num);
386 }
387 goto post_send_exit0;
388 }
389 /* write a SEND WQE into the QUEUE */
390 ret = ehca_write_swqe(my_qp, wqe_p, cur_send_wr);
391 /*
392 * if something failed,
393 * reset the free entry pointer to the start value
394 */
395 if (unlikely(ret)) { 446 if (unlikely(ret)) {
396 my_qp->ipz_squeue.current_q_offset = start_offset; 447 /* if one or more WQEs were successful, don't fail */
397 *bad_send_wr = cur_send_wr; 448 if (wqe_cnt)
398 if (wqe_cnt == 0) { 449 ret = 0;
399 ret = -EINVAL;
400 ehca_err(qp->device, "Could not write WQE "
401 "qp_num=%x", qp->qp_num);
402 }
403 goto post_send_exit0; 450 goto post_send_exit0;
404 } 451 }
405 wqe_cnt++; 452 wqe_cnt++;
@@ -410,6 +457,7 @@ int ehca_post_send(struct ib_qp *qp,
410post_send_exit0: 457post_send_exit0:
411 iosync(); /* serialize GAL register access */ 458 iosync(); /* serialize GAL register access */
412 hipz_update_sqa(my_qp, wqe_cnt); 459 hipz_update_sqa(my_qp, wqe_cnt);
460 my_qp->message_count += wqe_cnt;
413 spin_unlock_irqrestore(&my_qp->spinlock_s, flags); 461 spin_unlock_irqrestore(&my_qp->spinlock_s, flags);
414 return ret; 462 return ret;
415} 463}
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index f0792e5fbd02..79e72b25b252 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -40,11 +40,8 @@
40 */ 40 */
41 41
42 42
43#include <linux/module.h>
44#include <linux/err.h>
45#include "ehca_classes.h" 43#include "ehca_classes.h"
46#include "ehca_tools.h" 44#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
49#include "hcp_if.h" 46#include "hcp_if.h"
50 47
@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
93 return H_PARAMETER; 90 return H_PARAMETER;
94 } 91 }
95 92
93 if (ehca_nr_ports < 0) /* autodetect mode */
94 return H_SUCCESS;
95
96 for (counter = 0; 96 for (counter = 0;
97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE && 97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
98 counter < ehca_port_act_time; 98 counter < ehca_port_act_time;