aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c46
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c25
-rw-r--r--drivers/infiniband/hw/qib/qib_intr.c6
6 files changed, 66 insertions, 22 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f660cd04ec2f..31fb44085c9b 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1463,9 +1463,9 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1463 struct c4iw_qp_attributes attrs; 1463 struct c4iw_qp_attributes attrs;
1464 int disconnect = 1; 1464 int disconnect = 1;
1465 int release = 0; 1465 int release = 0;
1466 int abort = 0;
1467 struct tid_info *t = dev->rdev.lldi.tids; 1466 struct tid_info *t = dev->rdev.lldi.tids;
1468 unsigned int tid = GET_TID(hdr); 1467 unsigned int tid = GET_TID(hdr);
1468 int ret;
1469 1469
1470 ep = lookup_tid(t, tid); 1470 ep = lookup_tid(t, tid);
1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1471 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
@@ -1501,10 +1501,12 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1501 start_ep_timer(ep); 1501 start_ep_timer(ep);
1502 __state_set(&ep->com, CLOSING); 1502 __state_set(&ep->com, CLOSING);
1503 attrs.next_state = C4IW_QP_STATE_CLOSING; 1503 attrs.next_state = C4IW_QP_STATE_CLOSING;
1504 abort = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1504 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1505 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1506 peer_close_upcall(ep); 1506 if (ret != -ECONNRESET) {
1507 disconnect = 1; 1507 peer_close_upcall(ep);
1508 disconnect = 1;
1509 }
1508 break; 1510 break;
1509 case ABORTING: 1511 case ABORTING:
1510 disconnect = 0; 1512 disconnect = 0;
@@ -2109,15 +2111,16 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2109 break; 2111 break;
2110 } 2112 }
2111 2113
2112 mutex_unlock(&ep->com.mutex);
2113 if (close) { 2114 if (close) {
2114 if (abrupt) 2115 if (abrupt) {
2115 ret = abort_connection(ep, NULL, gfp); 2116 close_complete_upcall(ep);
2116 else 2117 ret = send_abort(ep, NULL, gfp);
2118 } else
2117 ret = send_halfclose(ep, gfp); 2119 ret = send_halfclose(ep, gfp);
2118 if (ret) 2120 if (ret)
2119 fatal = 1; 2121 fatal = 1;
2120 } 2122 }
2123 mutex_unlock(&ep->com.mutex);
2121 if (fatal) 2124 if (fatal)
2122 release_ep_resources(ep); 2125 release_ep_resources(ep);
2123 return ret; 2126 return ret;
@@ -2301,6 +2304,31 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2301 return 0; 2304 return 0;
2302} 2305}
2303 2306
2307static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
2308{
2309 struct cpl_abort_req_rss *req = cplhdr(skb);
2310 struct c4iw_ep *ep;
2311 struct tid_info *t = dev->rdev.lldi.tids;
2312 unsigned int tid = GET_TID(req);
2313
2314 ep = lookup_tid(t, tid);
2315 if (is_neg_adv_abort(req->status)) {
2316 PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
2317 ep->hwtid);
2318 kfree_skb(skb);
2319 return 0;
2320 }
2321 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2322 ep->com.state);
2323
2324 /*
2325 * Wake up any threads in rdma_init() or rdma_fini().
2326 */
2327 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2328 sched(dev, skb);
2329 return 0;
2330}
2331
2304/* 2332/*
2305 * Most upcalls from the T4 Core go to sched() to 2333 * Most upcalls from the T4 Core go to sched() to
2306 * schedule the processing on a work queue. 2334 * schedule the processing on a work queue.
@@ -2317,7 +2345,7 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2317 [CPL_PASS_ESTABLISH] = sched, 2345 [CPL_PASS_ESTABLISH] = sched,
2318 [CPL_PEER_CLOSE] = sched, 2346 [CPL_PEER_CLOSE] = sched,
2319 [CPL_CLOSE_CON_RPL] = sched, 2347 [CPL_CLOSE_CON_RPL] = sched,
2320 [CPL_ABORT_REQ_RSS] = sched, 2348 [CPL_ABORT_REQ_RSS] = peer_abort_intr,
2321 [CPL_RDMA_TERMINATE] = sched, 2349 [CPL_RDMA_TERMINATE] = sched,
2322 [CPL_FW4_ACK] = sched, 2350 [CPL_FW4_ACK] = sched,
2323 [CPL_SET_TCB_RPL] = set_tcb_rpl, 2351 [CPL_SET_TCB_RPL] = set_tcb_rpl,
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 8d8f8add6fcd..1720dc790d13 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -801,6 +801,10 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
801 if (ucontext) { 801 if (ucontext) {
802 memsize = roundup(memsize, PAGE_SIZE); 802 memsize = roundup(memsize, PAGE_SIZE);
803 hwentries = memsize / sizeof *chp->cq.queue; 803 hwentries = memsize / sizeof *chp->cq.queue;
804 while (hwentries > T4_MAX_IQ_SIZE) {
805 memsize -= PAGE_SIZE;
806 hwentries = memsize / sizeof *chp->cq.queue;
807 }
804 } 808 }
805 chp->cq.size = hwentries; 809 chp->cq.size = hwentries;
806 chp->cq.memsize = memsize; 810 chp->cq.memsize = memsize;
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 273ffe49525a..0347eed4a167 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -625,7 +625,7 @@ pbl_done:
625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc); 625 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
626 mhp->attr.va_fbo = virt; 626 mhp->attr.va_fbo = virt;
627 mhp->attr.page_size = shift - 12; 627 mhp->attr.page_size = shift - 12;
628 mhp->attr.len = (u32) length; 628 mhp->attr.len = length;
629 629
630 err = register_mem(rhp, php, mhp, shift); 630 err = register_mem(rhp, php, mhp, shift);
631 if (err) 631 if (err)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 3b773b05a898..a41578e48c7b 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1207,11 +1207,8 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1207 c4iw_get_ep(&qhp->ep->com); 1207 c4iw_get_ep(&qhp->ep->com);
1208 } 1208 }
1209 ret = rdma_fini(rhp, qhp, ep); 1209 ret = rdma_fini(rhp, qhp, ep);
1210 if (ret) { 1210 if (ret)
1211 if (internal)
1212 c4iw_get_ep(&qhp->ep->com);
1213 goto err; 1211 goto err;
1214 }
1215 break; 1212 break;
1216 case C4IW_QP_STATE_TERMINATE: 1213 case C4IW_QP_STATE_TERMINATE:
1217 set_state(qhp, C4IW_QP_STATE_TERMINATE); 1214 set_state(qhp, C4IW_QP_STATE_TERMINATE);
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 9f53e68a096a..8ec5237031a0 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -469,6 +469,8 @@ static u8 ib_rate_to_delay[IB_RATE_120_GBPS + 1] = {
469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f 469#define IB_7322_LT_STATE_RECOVERIDLE 0x0f
470#define IB_7322_LT_STATE_CFGENH 0x10 470#define IB_7322_LT_STATE_CFGENH 0x10
471#define IB_7322_LT_STATE_CFGTEST 0x11 471#define IB_7322_LT_STATE_CFGTEST 0x11
472#define IB_7322_LT_STATE_CFGWAITRMTTEST 0x12
473#define IB_7322_LT_STATE_CFGWAITENH 0x13
472 474
473/* link state machine states from IBC */ 475/* link state machine states from IBC */
474#define IB_7322_L_STATE_DOWN 0x0 476#define IB_7322_L_STATE_DOWN 0x0
@@ -498,8 +500,10 @@ static const u8 qib_7322_physportstate[0x20] = {
498 IB_PHYSPORTSTATE_LINK_ERR_RECOVER, 500 IB_PHYSPORTSTATE_LINK_ERR_RECOVER,
499 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH, 501 [IB_7322_LT_STATE_CFGENH] = IB_PHYSPORTSTATE_CFG_ENH,
500 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN, 502 [IB_7322_LT_STATE_CFGTEST] = IB_PHYSPORTSTATE_CFG_TRAIN,
501 [0x12] = IB_PHYSPORTSTATE_CFG_TRAIN, 503 [IB_7322_LT_STATE_CFGWAITRMTTEST] =
502 [0x13] = IB_PHYSPORTSTATE_CFG_WAIT_ENH, 504 IB_PHYSPORTSTATE_CFG_TRAIN,
505 [IB_7322_LT_STATE_CFGWAITENH] =
506 IB_PHYSPORTSTATE_CFG_WAIT_ENH,
503 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN, 507 [0x14] = IB_PHYSPORTSTATE_CFG_TRAIN,
504 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN, 508 [0x15] = IB_PHYSPORTSTATE_CFG_TRAIN,
505 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN, 509 [0x16] = IB_PHYSPORTSTATE_CFG_TRAIN,
@@ -1692,7 +1696,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
1692 break; 1696 break;
1693 } 1697 }
1694 1698
1695 if (ibclt == IB_7322_LT_STATE_CFGTEST && 1699 if (((ibclt >= IB_7322_LT_STATE_CFGTEST &&
1700 ibclt <= IB_7322_LT_STATE_CFGWAITENH) ||
1701 ibclt == IB_7322_LT_STATE_LINKUP) &&
1696 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) { 1702 (ibcst & SYM_MASK(IBCStatusA_0, LinkSpeedQDR))) {
1697 force_h1(ppd); 1703 force_h1(ppd);
1698 ppd->cpspec->qdr_reforce = 1; 1704 ppd->cpspec->qdr_reforce = 1;
@@ -7301,12 +7307,17 @@ static void ibsd_wr_allchans(struct qib_pportdata *ppd, int addr, unsigned data,
7301static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) 7307static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
7302{ 7308{
7303 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl); 7309 u64 data = qib_read_kreg_port(ppd, krp_serdesctrl);
7304 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS %s\n", 7310 u8 state = SYM_FIELD(data, IBSerdesCtrl_0, RXLOSEN);
7305 ppd->dd->unit, ppd->port, (enable ? "on" : "off")); 7311
7306 if (enable) 7312 if (enable && !state) {
7313 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS on\n",
7314 ppd->dd->unit, ppd->port);
7307 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7315 data |= SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7308 else 7316 } else if (!enable && state) {
7317 printk(KERN_INFO QIB_DRV_NAME " IB%u:%u Turning LOS off\n",
7318 ppd->dd->unit, ppd->port);
7309 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN); 7319 data &= ~SYM_MASK(IBSerdesCtrl_0, RXLOSEN);
7320 }
7310 qib_write_kreg_port(ppd, krp_serdesctrl, data); 7321 qib_write_kreg_port(ppd, krp_serdesctrl, data);
7311} 7322}
7312 7323
diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c
index a693c56ec8a6..6ae57d23004a 100644
--- a/drivers/infiniband/hw/qib/qib_intr.c
+++ b/drivers/infiniband/hw/qib/qib_intr.c
@@ -96,8 +96,12 @@ void qib_handle_e_ibstatuschanged(struct qib_pportdata *ppd, u64 ibcs)
96 * states, or if it transitions from any of the up (INIT or better) 96 * states, or if it transitions from any of the up (INIT or better)
97 * states into any of the down states (except link recovery), then 97 * states into any of the down states (except link recovery), then
98 * call the chip-specific code to take appropriate actions. 98 * call the chip-specific code to take appropriate actions.
99 *
100 * ppd->lflags could be 0 if this is the first time the interrupt
101 * handlers has been called but the link is already up.
99 */ 102 */
100 if (lstate >= IB_PORT_INIT && (ppd->lflags & QIBL_LINKDOWN) && 103 if (lstate >= IB_PORT_INIT &&
104 (!ppd->lflags || (ppd->lflags & QIBL_LINKDOWN)) &&
101 ltstate == IB_PHYSPORTSTATE_LINKUP) { 105 ltstate == IB_PHYSPORTSTATE_LINKUP) {
102 /* transitioned to UP */ 106 /* transitioned to UP */
103 if (dd->f_ib_updown(ppd, 1, ibcs)) 107 if (dd->f_ib_updown(ppd, 1, ibcs))