aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorHoang-Nam Nguyen <hnguyen@de.ibm.com>2008-01-17 09:05:45 -0500
committerRoland Dreier <rolandd@cisco.com>2008-01-25 17:15:44 -0500
commitbbdd267ef2796e96b461b8447b2026ce06e6ec4b (patch)
tree45cea3f33acc271da83fcccca0c5626c0281da4e /drivers/infiniband/hw
parentb8b50e353b85bc3c791dd2b99370ac300ebcd186 (diff)
IB/ehca: Add "port connection autodetect mode"
This patch enhances ehca with a capability to "autodetect" the ports being connected physically. In order to utilize that function the module option nr_ports must be set to -1 (default is 2 - two ports). This feature is experimental and will made the default later. More detail: If the user connects only one port to the switch, current code requires 1) port one to be connected and 2) module option nr_ports=1 to be given. If autodetect is enabled, ehca will not wait at creation of the GSI QP for the respective port to become active. Since firmware does not accept modify_qp() while the port is down at initialization, we need to cache all calls to modify_qp() for the SMI/GSI QP and just return a good return code. When a port is activated and we get a PORT_ACTIVE event, we replay the cached modify-qp() parms and re-trigger any posted recv WRs. Only then do we forward the PORT_ACTIVE event to registered clients. The result of this autodetect patch is that all ports will be accessible by the users. Depending on their respective cabling only those ports that are connected properly will become operable. If a user tries to modify a regular QP of a non-connected port, modify_qp() will fail. Furthermore, ibv_devinfo should show the port state accordingly. Note that this patch primarily improves the loading behaviour of ehca. If the cable is removed while the driver is operating and plugged in again, firmware will handle that properly by sending an appropriate async event. Signed-off-by: Hoang-Nam Nguyen <hnguyen@de.ibm.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h16
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c26
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c7
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c159
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c6
6 files changed, 201 insertions, 15 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index 936580d86ad3..2502366e845f 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -95,6 +95,10 @@ struct ehca_sma_attr {
95struct ehca_sport { 95struct ehca_sport {
96 struct ib_cq *ibcq_aqp1; 96 struct ib_cq *ibcq_aqp1;
97 struct ib_qp *ibqp_sqp[2]; 97 struct ib_qp *ibqp_sqp[2];
98 /* lock to serialze modify_qp() calls for sqp in normal
99 * and irq path (when event PORT_ACTIVE is received first time)
100 */
101 spinlock_t mod_sqp_lock;
98 enum ib_port_state port_state; 102 enum ib_port_state port_state;
99 struct ehca_sma_attr saved_attr; 103 struct ehca_sma_attr saved_attr;
100}; 104};
@@ -141,6 +145,14 @@ enum ehca_ext_qp_type {
141 EQPT_SRQ = 3, 145 EQPT_SRQ = 3,
142}; 146};
143 147
148/* struct to cache modify_qp()'s parms for GSI/SMI qp */
149struct ehca_mod_qp_parm {
150 int mask;
151 struct ib_qp_attr attr;
152};
153
154#define EHCA_MOD_QP_PARM_MAX 4
155
144struct ehca_qp { 156struct ehca_qp {
145 union { 157 union {
146 struct ib_qp ib_qp; 158 struct ib_qp ib_qp;
@@ -164,6 +176,9 @@ struct ehca_qp {
164 struct ehca_cq *recv_cq; 176 struct ehca_cq *recv_cq;
165 unsigned int sqerr_purgeflag; 177 unsigned int sqerr_purgeflag;
166 struct hlist_node list_entries; 178 struct hlist_node list_entries;
179 /* array to cache modify_qp()'s parms for GSI/SMI qp */
180 struct ehca_mod_qp_parm *mod_qp_parm;
181 int mod_qp_parm_idx;
167 /* mmap counter for resources mapped into user space */ 182 /* mmap counter for resources mapped into user space */
168 u32 mm_count_squeue; 183 u32 mm_count_squeue;
169 u32 mm_count_rqueue; 184 u32 mm_count_rqueue;
@@ -323,6 +338,7 @@ extern int ehca_port_act_time;
323extern int ehca_use_hp_mr; 338extern int ehca_use_hp_mr;
324extern int ehca_scaling_code; 339extern int ehca_scaling_code;
325extern int ehca_lock_hcalls; 340extern int ehca_lock_hcalls;
341extern int ehca_nr_ports;
326 342
327struct ipzu_queue_resp { 343struct ipzu_queue_resp {
328 u32 qe_size; /* queue entry size */ 344 u32 qe_size; /* queue entry size */
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 4c734ecef11d..863b34fa9ff9 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -356,17 +356,33 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
356 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe); 356 u8 ec = EHCA_BMASK_GET(NEQE_EVENT_CODE, eqe);
357 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe); 357 u8 port = EHCA_BMASK_GET(NEQE_PORT_NUMBER, eqe);
358 u8 spec_event; 358 u8 spec_event;
359 struct ehca_sport *sport = &shca->sport[port - 1];
360 unsigned long flags;
359 361
360 switch (ec) { 362 switch (ec) {
361 case 0x30: /* port availability change */ 363 case 0x30: /* port availability change */
362 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) { 364 if (EHCA_BMASK_GET(NEQE_PORT_AVAILABILITY, eqe)) {
363 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 365 int suppress_event;
366 /* replay modify_qp for sqps */
367 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
368 suppress_event = !sport->ibqp_sqp[IB_QPT_GSI];
369 if (sport->ibqp_sqp[IB_QPT_SMI])
370 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_SMI]);
371 if (!suppress_event)
372 ehca_recover_sqp(sport->ibqp_sqp[IB_QPT_GSI]);
373 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
374
375 /* AQP1 was destroyed, ignore this event */
376 if (suppress_event)
377 break;
378
379 sport->port_state = IB_PORT_ACTIVE;
364 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 380 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
365 "is active"); 381 "is active");
366 ehca_query_sma_attr(shca, port, 382 ehca_query_sma_attr(shca, port,
367 &shca->sport[port - 1].saved_attr); 383 &sport->saved_attr);
368 } else { 384 } else {
369 shca->sport[port - 1].port_state = IB_PORT_DOWN; 385 sport->port_state = IB_PORT_DOWN;
370 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 386 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
371 "is inactive"); 387 "is inactive");
372 } 388 }
@@ -380,11 +396,11 @@ static void parse_ec(struct ehca_shca *shca, u64 eqe)
380 ehca_warn(&shca->ib_device, "disruptive port " 396 ehca_warn(&shca->ib_device, "disruptive port "
381 "%d configuration change", port); 397 "%d configuration change", port);
382 398
383 shca->sport[port - 1].port_state = IB_PORT_DOWN; 399 sport->port_state = IB_PORT_DOWN;
384 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR, 400 dispatch_port_event(shca, port, IB_EVENT_PORT_ERR,
385 "is inactive"); 401 "is inactive");
386 402
387 shca->sport[port - 1].port_state = IB_PORT_ACTIVE; 403 sport->port_state = IB_PORT_ACTIVE;
388 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE, 404 dispatch_port_event(shca, port, IB_EVENT_PORT_ACTIVE,
389 "is active"); 405 "is active");
390 } else 406 } else
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index 5485799cdc8d..c469bfde2708 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -200,4 +200,6 @@ void ehca_free_fw_ctrlblock(void *ptr);
200#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr)) 200#define ehca_free_fw_ctrlblock(ptr) free_page((unsigned long)(ptr))
201#endif 201#endif
202 202
203void ehca_recover_sqp(struct ib_qp *sqp);
204
203#endif 205#endif
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index 0a34083dac8a..84c9b7b8669b 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -90,7 +90,8 @@ MODULE_PARM_DESC(hw_level,
90 "hardware level" 90 "hardware level"
91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)"); 91 " (0: autosensing (default), 1: v. 0.20, 2: v. 0.21)");
92MODULE_PARM_DESC(nr_ports, 92MODULE_PARM_DESC(nr_ports,
93 "number of connected ports (default: 2)"); 93 "number of connected ports (-1: autodetect, 1: port one only, "
94 "2: two ports (default)");
94MODULE_PARM_DESC(use_hp_mr, 95MODULE_PARM_DESC(use_hp_mr,
95 "high performance MRs (0: no (default), 1: yes)"); 96 "high performance MRs (0: no (default), 1: yes)");
96MODULE_PARM_DESC(port_act_time, 97MODULE_PARM_DESC(port_act_time,
@@ -693,7 +694,7 @@ static int __devinit ehca_probe(struct of_device *dev,
693 struct ehca_shca *shca; 694 struct ehca_shca *shca;
694 const u64 *handle; 695 const u64 *handle;
695 struct ib_pd *ibpd; 696 struct ib_pd *ibpd;
696 int ret; 697 int ret, i;
697 698
698 handle = of_get_property(dev->node, "ibm,hca-handle", NULL); 699 handle = of_get_property(dev->node, "ibm,hca-handle", NULL);
699 if (!handle) { 700 if (!handle) {
@@ -714,6 +715,8 @@ static int __devinit ehca_probe(struct of_device *dev,
714 return -ENOMEM; 715 return -ENOMEM;
715 } 716 }
716 mutex_init(&shca->modify_mutex); 717 mutex_init(&shca->modify_mutex);
718 for (i = 0; i < ARRAY_SIZE(shca->sport); i++)
719 spin_lock_init(&shca->sport[i].mod_sqp_lock);
717 720
718 shca->ofdev = dev; 721 shca->ofdev = dev;
719 shca->ipz_hca_handle.handle = *handle; 722 shca->ipz_hca_handle.handle = *handle;
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 26c6a945459f..8d3c35fa051b 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -729,12 +729,31 @@ static struct ehca_qp *internal_create_qp(
729 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes; 729 init_attr->cap.max_send_wr = parms.squeue.act_nr_wqes;
730 my_qp->init_attr = *init_attr; 730 my_qp->init_attr = *init_attr;
731 731
732 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
733 shca->sport[init_attr->port_num - 1].ibqp_sqp[qp_type] =
734 &my_qp->ib_qp;
735 if (ehca_nr_ports < 0) {
736 /* alloc array to cache subsequent modify qp parms
737 * for autodetect mode
738 */
739 my_qp->mod_qp_parm =
740 kzalloc(EHCA_MOD_QP_PARM_MAX *
741 sizeof(*my_qp->mod_qp_parm),
742 GFP_KERNEL);
743 if (!my_qp->mod_qp_parm) {
744 ehca_err(pd->device,
745 "Could not alloc mod_qp_parm");
746 goto create_qp_exit4;
747 }
748 }
749 }
750
732 /* NOTE: define_apq0() not supported yet */ 751 /* NOTE: define_apq0() not supported yet */
733 if (qp_type == IB_QPT_GSI) { 752 if (qp_type == IB_QPT_GSI) {
734 h_ret = ehca_define_sqp(shca, my_qp, init_attr); 753 h_ret = ehca_define_sqp(shca, my_qp, init_attr);
735 if (h_ret != H_SUCCESS) { 754 if (h_ret != H_SUCCESS) {
736 ret = ehca2ib_return_code(h_ret); 755 ret = ehca2ib_return_code(h_ret);
737 goto create_qp_exit4; 756 goto create_qp_exit5;
738 } 757 }
739 } 758 }
740 759
@@ -743,7 +762,7 @@ static struct ehca_qp *internal_create_qp(
743 if (ret) { 762 if (ret) {
744 ehca_err(pd->device, 763 ehca_err(pd->device,
745 "Couldn't assign qp to send_cq ret=%i", ret); 764 "Couldn't assign qp to send_cq ret=%i", ret);
746 goto create_qp_exit4; 765 goto create_qp_exit5;
747 } 766 }
748 } 767 }
749 768
@@ -769,15 +788,18 @@ static struct ehca_qp *internal_create_qp(
769 if (ib_copy_to_udata(udata, &resp, sizeof resp)) { 788 if (ib_copy_to_udata(udata, &resp, sizeof resp)) {
770 ehca_err(pd->device, "Copy to udata failed"); 789 ehca_err(pd->device, "Copy to udata failed");
771 ret = -EINVAL; 790 ret = -EINVAL;
772 goto create_qp_exit5; 791 goto create_qp_exit6;
773 } 792 }
774 } 793 }
775 794
776 return my_qp; 795 return my_qp;
777 796
778create_qp_exit5: 797create_qp_exit6:
779 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num); 798 ehca_cq_unassign_qp(my_qp->send_cq, my_qp->real_qp_num);
780 799
800create_qp_exit5:
801 kfree(my_qp->mod_qp_parm);
802
781create_qp_exit4: 803create_qp_exit4:
782 if (HAS_RQ(my_qp)) 804 if (HAS_RQ(my_qp))
783 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); 805 ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);
@@ -995,7 +1017,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,
995 unsigned long flags = 0; 1017 unsigned long flags = 0;
996 1018
997 /* do query_qp to obtain current attr values */ 1019 /* do query_qp to obtain current attr values */
998 mqpcb = ehca_alloc_fw_ctrlblock(GFP_KERNEL); 1020 mqpcb = ehca_alloc_fw_ctrlblock(GFP_ATOMIC);
999 if (!mqpcb) { 1021 if (!mqpcb) {
1000 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb " 1022 ehca_err(ibqp->device, "Could not get zeroed page for mqpcb "
1001 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num); 1023 "ehca_qp=%p qp_num=%x ", my_qp, ibqp->qp_num);
@@ -1183,6 +1205,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1183 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1); 1205 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_P_KEY_IDX, 1);
1184 } 1206 }
1185 if (attr_mask & IB_QP_PORT) { 1207 if (attr_mask & IB_QP_PORT) {
1208 struct ehca_sport *sport;
1209 struct ehca_qp *aqp1;
1186 if (attr->port_num < 1 || attr->port_num > shca->num_ports) { 1210 if (attr->port_num < 1 || attr->port_num > shca->num_ports) {
1187 ret = -EINVAL; 1211 ret = -EINVAL;
1188 ehca_err(ibqp->device, "Invalid port=%x. " 1212 ehca_err(ibqp->device, "Invalid port=%x. "
@@ -1191,6 +1215,29 @@ static int internal_modify_qp(struct ib_qp *ibqp,
1191 shca->num_ports); 1215 shca->num_ports);
1192 goto modify_qp_exit2; 1216 goto modify_qp_exit2;
1193 } 1217 }
1218 sport = &shca->sport[attr->port_num - 1];
1219 if (!sport->ibqp_sqp[IB_QPT_GSI]) {
1220 /* should not occur */
1221 ret = -EFAULT;
1222 ehca_err(ibqp->device, "AQP1 was not created for "
1223 "port=%x", attr->port_num);
1224 goto modify_qp_exit2;
1225 }
1226 aqp1 = container_of(sport->ibqp_sqp[IB_QPT_GSI],
1227 struct ehca_qp, ib_qp);
1228 if (ibqp->qp_type != IB_QPT_GSI &&
1229 ibqp->qp_type != IB_QPT_SMI &&
1230 aqp1->mod_qp_parm) {
1231 /*
1232 * firmware will reject this modify_qp() because
1233 * port is not activated/initialized fully
1234 */
1235 ret = -EFAULT;
1236 ehca_warn(ibqp->device, "Couldn't modify qp port=%x: "
1237 "either port is being activated (try again) "
1238 "or cabling issue", attr->port_num);
1239 goto modify_qp_exit2;
1240 }
1194 mqpcb->prim_phys_port = attr->port_num; 1241 mqpcb->prim_phys_port = attr->port_num;
1195 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1); 1242 update_mask |= EHCA_BMASK_SET(MQPCB_MASK_PRIM_PHYS_PORT, 1);
1196 } 1243 }
@@ -1470,6 +1517,8 @@ modify_qp_exit1:
1470int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask, 1517int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1471 struct ib_udata *udata) 1518 struct ib_udata *udata)
1472{ 1519{
1520 struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca,
1521 ib_device);
1473 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); 1522 struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp);
1474 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1523 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1475 ib_pd); 1524 ib_pd);
@@ -1482,9 +1531,100 @@ int ehca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
1482 return -EINVAL; 1531 return -EINVAL;
1483 } 1532 }
1484 1533
1534 /* The if-block below caches qp_attr to be modified for GSI and SMI
1535 * qps during the initialization by ib_mad. When the respective port
1536 * is activated, ie we got an event PORT_ACTIVE, we'll replay the
1537 * cached modify calls sequence, see ehca_recover_sqs() below.
1538 * Why that is required:
1539 * 1) If one port is connected, older code requires that port one
1540 * to be connected and module option nr_ports=1 to be given by
1541 * user, which is very inconvenient for end user.
1542 * 2) Firmware accepts modify_qp() only if respective port has become
1543 * active. Older code had a wait loop of 30sec create_qp()/
1544 * define_aqp1(), which is not appropriate in practice. This
1545 * code now removes that wait loop, see define_aqp1(), and always
1546 * reports all ports to ib_mad resp. users. Only activated ports
1547 * will then usable for the users.
1548 */
1549 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1550 int port = my_qp->init_attr.port_num;
1551 struct ehca_sport *sport = &shca->sport[port - 1];
1552 unsigned long flags;
1553 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1554 /* cache qp_attr only during init */
1555 if (my_qp->mod_qp_parm) {
1556 struct ehca_mod_qp_parm *p;
1557 if (my_qp->mod_qp_parm_idx >= EHCA_MOD_QP_PARM_MAX) {
1558 ehca_err(&shca->ib_device,
1559 "mod_qp_parm overflow state=%x port=%x"
1560 " type=%x", attr->qp_state,
1561 my_qp->init_attr.port_num,
1562 ibqp->qp_type);
1563 spin_unlock_irqrestore(&sport->mod_sqp_lock,
1564 flags);
1565 return -EINVAL;
1566 }
1567 p = &my_qp->mod_qp_parm[my_qp->mod_qp_parm_idx];
1568 p->mask = attr_mask;
1569 p->attr = *attr;
1570 my_qp->mod_qp_parm_idx++;
1571 ehca_dbg(&shca->ib_device,
1572 "Saved qp_attr for state=%x port=%x type=%x",
1573 attr->qp_state, my_qp->init_attr.port_num,
1574 ibqp->qp_type);
1575 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1576 return 0;
1577 }
1578 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1579 }
1580
1485 return internal_modify_qp(ibqp, attr, attr_mask, 0); 1581 return internal_modify_qp(ibqp, attr, attr_mask, 0);
1486} 1582}
1487 1583
1584void ehca_recover_sqp(struct ib_qp *sqp)
1585{
1586 struct ehca_qp *my_sqp = container_of(sqp, struct ehca_qp, ib_qp);
1587 int port = my_sqp->init_attr.port_num;
1588 struct ib_qp_attr attr;
1589 struct ehca_mod_qp_parm *qp_parm;
1590 int i, qp_parm_idx, ret;
1591 unsigned long flags, wr_cnt;
1592
1593 if (!my_sqp->mod_qp_parm)
1594 return;
1595 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x", port, sqp->qp_num);
1596
1597 qp_parm = my_sqp->mod_qp_parm;
1598 qp_parm_idx = my_sqp->mod_qp_parm_idx;
1599 for (i = 0; i < qp_parm_idx; i++) {
1600 attr = qp_parm[i].attr;
1601 ret = internal_modify_qp(sqp, &attr, qp_parm[i].mask, 0);
1602 if (ret) {
1603 ehca_err(sqp->device, "Could not modify SQP port=%x "
1604 "qp_num=%x ret=%x", port, sqp->qp_num, ret);
1605 goto free_qp_parm;
1606 }
1607 ehca_dbg(sqp->device, "SQP port=%x qp_num=%x in state=%x",
1608 port, sqp->qp_num, attr.qp_state);
1609 }
1610
1611 /* re-trigger posted recv wrs */
1612 wr_cnt = my_sqp->ipz_rqueue.current_q_offset /
1613 my_sqp->ipz_rqueue.qe_size;
1614 if (wr_cnt) {
1615 spin_lock_irqsave(&my_sqp->spinlock_r, flags);
1616 hipz_update_rqa(my_sqp, wr_cnt);
1617 spin_unlock_irqrestore(&my_sqp->spinlock_r, flags);
1618 ehca_dbg(sqp->device, "doorbell port=%x qp_num=%x wr_cnt=%lx",
1619 port, sqp->qp_num, wr_cnt);
1620 }
1621
1622free_qp_parm:
1623 kfree(qp_parm);
1624 /* this prevents subsequent calls to modify_qp() to cache qp_attr */
1625 my_sqp->mod_qp_parm = NULL;
1626}
1627
1488int ehca_query_qp(struct ib_qp *qp, 1628int ehca_query_qp(struct ib_qp *qp,
1489 struct ib_qp_attr *qp_attr, 1629 struct ib_qp_attr *qp_attr,
1490 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) 1630 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
@@ -1772,6 +1912,7 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1772 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device); 1912 struct ehca_shca *shca = container_of(dev, struct ehca_shca, ib_device);
1773 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd, 1913 struct ehca_pd *my_pd = container_of(my_qp->ib_qp.pd, struct ehca_pd,
1774 ib_pd); 1914 ib_pd);
1915 struct ehca_sport *sport = &shca->sport[my_qp->init_attr.port_num - 1];
1775 u32 cur_pid = current->tgid; 1916 u32 cur_pid = current->tgid;
1776 u32 qp_num = my_qp->real_qp_num; 1917 u32 qp_num = my_qp->real_qp_num;
1777 int ret; 1918 int ret;
@@ -1818,6 +1959,14 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,
1818 port_num = my_qp->init_attr.port_num; 1959 port_num = my_qp->init_attr.port_num;
1819 qp_type = my_qp->init_attr.qp_type; 1960 qp_type = my_qp->init_attr.qp_type;
1820 1961
1962 if (qp_type == IB_QPT_SMI || qp_type == IB_QPT_GSI) {
1963 spin_lock_irqsave(&sport->mod_sqp_lock, flags);
1964 kfree(my_qp->mod_qp_parm);
1965 my_qp->mod_qp_parm = NULL;
1966 shca->sport[port_num - 1].ibqp_sqp[qp_type] = NULL;
1967 spin_unlock_irqrestore(&sport->mod_sqp_lock, flags);
1968 }
1969
1821 /* no support for IB_QPT_SMI yet */ 1970 /* no support for IB_QPT_SMI yet */
1822 if (qp_type == IB_QPT_GSI) { 1971 if (qp_type == IB_QPT_GSI) {
1823 struct ib_event event; 1972 struct ib_event event;
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index f0792e5fbd02..79e72b25b252 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -40,11 +40,8 @@
40 */ 40 */
41 41
42 42
43#include <linux/module.h>
44#include <linux/err.h>
45#include "ehca_classes.h" 43#include "ehca_classes.h"
46#include "ehca_tools.h" 44#include "ehca_tools.h"
47#include "ehca_qes.h"
48#include "ehca_iverbs.h" 45#include "ehca_iverbs.h"
49#include "hcp_if.h" 46#include "hcp_if.h"
50 47
@@ -93,6 +90,9 @@ u64 ehca_define_sqp(struct ehca_shca *shca,
93 return H_PARAMETER; 90 return H_PARAMETER;
94 } 91 }
95 92
93 if (ehca_nr_ports < 0) /* autodetect mode */
94 return H_SUCCESS;
95
96 for (counter = 0; 96 for (counter = 0;
97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE && 97 shca->sport[port - 1].port_state != IB_PORT_ACTIVE &&
98 counter < ehca_port_act_time; 98 counter < ehca_port_act_time;