diff options
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/sa_query.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/core/ucma.c | 22 | ||||
-rw-r--r-- | drivers/infiniband/hw/amso1100/c2_vq.c | 6 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/cm.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 4 | ||||
-rw-r--r-- | drivers/infiniband/hw/nes/nes_hw.c | 32 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_iba7322.c | 30 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 5 |
8 files changed, 62 insertions, 41 deletions
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index e38be1bcc01c..fbbfa24cf572 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c | |||
@@ -1079,7 +1079,7 @@ static void ib_sa_remove_one(struct ib_device *device) | |||
1079 | 1079 | ||
1080 | ib_unregister_event_handler(&sa_dev->event_handler); | 1080 | ib_unregister_event_handler(&sa_dev->event_handler); |
1081 | 1081 | ||
1082 | flush_scheduled_work(); | 1082 | flush_workqueue(ib_wq); |
1083 | 1083 | ||
1084 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { | 1084 | for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { |
1085 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { | 1085 | if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) { |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index ca12acf38379..ec1e9da1488b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -636,6 +636,16 @@ static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp, | |||
636 | } | 636 | } |
637 | } | 637 | } |
638 | 638 | ||
639 | static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp, | ||
640 | struct rdma_route *route) | ||
641 | { | ||
642 | struct rdma_dev_addr *dev_addr; | ||
643 | |||
644 | dev_addr = &route->addr.dev_addr; | ||
645 | rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid); | ||
646 | rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid); | ||
647 | } | ||
648 | |||
639 | static ssize_t ucma_query_route(struct ucma_file *file, | 649 | static ssize_t ucma_query_route(struct ucma_file *file, |
640 | const char __user *inbuf, | 650 | const char __user *inbuf, |
641 | int in_len, int out_len) | 651 | int in_len, int out_len) |
@@ -670,8 +680,10 @@ static ssize_t ucma_query_route(struct ucma_file *file, | |||
670 | 680 | ||
671 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; | 681 | resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; |
672 | resp.port_num = ctx->cm_id->port_num; | 682 | resp.port_num = ctx->cm_id->port_num; |
673 | if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) { | 683 | switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { |
674 | switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) { | 684 | case RDMA_TRANSPORT_IB: |
685 | switch (rdma_port_get_link_layer(ctx->cm_id->device, | ||
686 | ctx->cm_id->port_num)) { | ||
675 | case IB_LINK_LAYER_INFINIBAND: | 687 | case IB_LINK_LAYER_INFINIBAND: |
676 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); | 688 | ucma_copy_ib_route(&resp, &ctx->cm_id->route); |
677 | break; | 689 | break; |
@@ -681,6 +693,12 @@ static ssize_t ucma_query_route(struct ucma_file *file, | |||
681 | default: | 693 | default: |
682 | break; | 694 | break; |
683 | } | 695 | } |
696 | break; | ||
697 | case RDMA_TRANSPORT_IWARP: | ||
698 | ucma_copy_iw_route(&resp, &ctx->cm_id->route); | ||
699 | break; | ||
700 | default: | ||
701 | break; | ||
684 | } | 702 | } |
685 | 703 | ||
686 | out: | 704 | out: |
diff --git a/drivers/infiniband/hw/amso1100/c2_vq.c b/drivers/infiniband/hw/amso1100/c2_vq.c index 9ce7819b7b2e..2ec716fb2edb 100644 --- a/drivers/infiniband/hw/amso1100/c2_vq.c +++ b/drivers/infiniband/hw/amso1100/c2_vq.c | |||
@@ -107,7 +107,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) | |||
107 | r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); | 107 | r = kmalloc(sizeof(struct c2_vq_req), GFP_KERNEL); |
108 | if (r) { | 108 | if (r) { |
109 | init_waitqueue_head(&r->wait_object); | 109 | init_waitqueue_head(&r->wait_object); |
110 | r->reply_msg = (u64) NULL; | 110 | r->reply_msg = 0; |
111 | r->event = 0; | 111 | r->event = 0; |
112 | r->cm_id = NULL; | 112 | r->cm_id = NULL; |
113 | r->qp = NULL; | 113 | r->qp = NULL; |
@@ -123,7 +123,7 @@ struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) | |||
123 | */ | 123 | */ |
124 | void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) | 124 | void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) |
125 | { | 125 | { |
126 | r->reply_msg = (u64) NULL; | 126 | r->reply_msg = 0; |
127 | if (atomic_dec_and_test(&r->refcnt)) { | 127 | if (atomic_dec_and_test(&r->refcnt)) { |
128 | kfree(r); | 128 | kfree(r); |
129 | } | 129 | } |
@@ -151,7 +151,7 @@ void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *r) | |||
151 | void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) | 151 | void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *r) |
152 | { | 152 | { |
153 | if (atomic_dec_and_test(&r->refcnt)) { | 153 | if (atomic_dec_and_test(&r->refcnt)) { |
154 | if (r->reply_msg != (u64) NULL) | 154 | if (r->reply_msg != 0) |
155 | vq_repbuf_free(c2dev, | 155 | vq_repbuf_free(c2dev, |
156 | (void *) (unsigned long) r->reply_msg); | 156 | (void *) (unsigned long) r->reply_msg); |
157 | kfree(r); | 157 | kfree(r); |
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0dc62b1438be..8b00e6c46f01 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c | |||
@@ -380,7 +380,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb) | |||
380 | 16)) | FW_WR_FLOWID(ep->hwtid)); | 380 | 16)) | FW_WR_FLOWID(ep->hwtid)); |
381 | 381 | ||
382 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; | 382 | flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; |
383 | flowc->mnemval[0].val = cpu_to_be32(0); | 383 | flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); |
384 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; | 384 | flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; |
385 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); | 385 | flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); |
386 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; | 386 | flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 20800900ef3f..4f0be25cab1a 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -220,7 +220,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
220 | V_FW_RI_RES_WR_DCAEN(0) | | 220 | V_FW_RI_RES_WR_DCAEN(0) | |
221 | V_FW_RI_RES_WR_DCACPU(0) | | 221 | V_FW_RI_RES_WR_DCACPU(0) | |
222 | V_FW_RI_RES_WR_FBMIN(2) | | 222 | V_FW_RI_RES_WR_FBMIN(2) | |
223 | V_FW_RI_RES_WR_FBMAX(3) | | 223 | V_FW_RI_RES_WR_FBMAX(2) | |
224 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | 224 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | |
225 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | 225 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | |
226 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | 226 | V_FW_RI_RES_WR_EQSIZE(eqsize)); |
@@ -243,7 +243,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, | |||
243 | V_FW_RI_RES_WR_DCAEN(0) | | 243 | V_FW_RI_RES_WR_DCAEN(0) | |
244 | V_FW_RI_RES_WR_DCACPU(0) | | 244 | V_FW_RI_RES_WR_DCACPU(0) | |
245 | V_FW_RI_RES_WR_FBMIN(2) | | 245 | V_FW_RI_RES_WR_FBMIN(2) | |
246 | V_FW_RI_RES_WR_FBMAX(3) | | 246 | V_FW_RI_RES_WR_FBMAX(2) | |
247 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | | 247 | V_FW_RI_RES_WR_CIDXFTHRESHO(0) | |
248 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | | 248 | V_FW_RI_RES_WR_CIDXFTHRESH(0) | |
249 | V_FW_RI_RES_WR_EQSIZE(eqsize)); | 249 | V_FW_RI_RES_WR_EQSIZE(eqsize)); |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd64022..08c194861af5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2610 | netif_carrier_on(nesvnic->netdev); | 2610 | netif_carrier_on(nesvnic->netdev); |
2611 | 2611 | ||
2612 | spin_lock(&nesvnic->port_ibevent_lock); | 2612 | spin_lock(&nesvnic->port_ibevent_lock); |
2613 | if (nesdev->iw_status == 0) { | 2613 | if (nesvnic->of_device_registered) { |
2614 | nesdev->iw_status = 1; | 2614 | if (nesdev->iw_status == 0) { |
2615 | nes_port_ibevent(nesvnic); | 2615 | nesdev->iw_status = 1; |
2616 | nes_port_ibevent(nesvnic); | ||
2617 | } | ||
2616 | } | 2618 | } |
2617 | spin_unlock(&nesvnic->port_ibevent_lock); | 2619 | spin_unlock(&nesvnic->port_ibevent_lock); |
2618 | } | 2620 | } |
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2642 | netif_carrier_off(nesvnic->netdev); | 2644 | netif_carrier_off(nesvnic->netdev); |
2643 | 2645 | ||
2644 | spin_lock(&nesvnic->port_ibevent_lock); | 2646 | spin_lock(&nesvnic->port_ibevent_lock); |
2645 | if (nesdev->iw_status == 1) { | 2647 | if (nesvnic->of_device_registered) { |
2646 | nesdev->iw_status = 0; | 2648 | if (nesdev->iw_status == 1) { |
2647 | nes_port_ibevent(nesvnic); | 2649 | nesdev->iw_status = 0; |
2650 | nes_port_ibevent(nesvnic); | ||
2651 | } | ||
2648 | } | 2652 | } |
2649 | spin_unlock(&nesvnic->port_ibevent_lock); | 2653 | spin_unlock(&nesvnic->port_ibevent_lock); |
2650 | } | 2654 | } |
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2703 | netif_carrier_on(nesvnic->netdev); | 2707 | netif_carrier_on(nesvnic->netdev); |
2704 | 2708 | ||
2705 | spin_lock(&nesvnic->port_ibevent_lock); | 2709 | spin_lock(&nesvnic->port_ibevent_lock); |
2706 | if (nesdev->iw_status == 0) { | 2710 | if (nesvnic->of_device_registered) { |
2707 | nesdev->iw_status = 1; | 2711 | if (nesdev->iw_status == 0) { |
2708 | nes_port_ibevent(nesvnic); | 2712 | nesdev->iw_status = 1; |
2713 | nes_port_ibevent(nesvnic); | ||
2714 | } | ||
2709 | } | 2715 | } |
2710 | spin_unlock(&nesvnic->port_ibevent_lock); | 2716 | spin_unlock(&nesvnic->port_ibevent_lock); |
2711 | } | 2717 | } |
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2723 | netif_carrier_off(nesvnic->netdev); | 2729 | netif_carrier_off(nesvnic->netdev); |
2724 | 2730 | ||
2725 | spin_lock(&nesvnic->port_ibevent_lock); | 2731 | spin_lock(&nesvnic->port_ibevent_lock); |
2726 | if (nesdev->iw_status == 1) { | 2732 | if (nesvnic->of_device_registered) { |
2727 | nesdev->iw_status = 0; | 2733 | if (nesdev->iw_status == 1) { |
2728 | nes_port_ibevent(nesvnic); | 2734 | nesdev->iw_status = 0; |
2735 | nes_port_ibevent(nesvnic); | ||
2736 | } | ||
2729 | } | 2737 | } |
2730 | spin_unlock(&nesvnic->port_ibevent_lock); | 2738 | spin_unlock(&nesvnic->port_ibevent_lock); |
2731 | } | 2739 | } |
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 50cceb3ab885..b01809a82cb0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c | |||
@@ -623,7 +623,6 @@ struct qib_chippport_specific { | |||
623 | u8 ibmalfusesnap; | 623 | u8 ibmalfusesnap; |
624 | struct qib_qsfp_data qsfp_data; | 624 | struct qib_qsfp_data qsfp_data; |
625 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ | 625 | char epmsgbuf[192]; /* for port error interrupt msg buffer */ |
626 | u8 bounced; | ||
627 | }; | 626 | }; |
628 | 627 | ||
629 | static struct { | 628 | static struct { |
@@ -1881,23 +1880,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |||
1881 | IB_PHYSPORTSTATE_DISABLED) | 1880 | IB_PHYSPORTSTATE_DISABLED) |
1882 | qib_set_ib_7322_lstate(ppd, 0, | 1881 | qib_set_ib_7322_lstate(ppd, 0, |
1883 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | 1882 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); |
1884 | else { | 1883 | else |
1885 | u32 lstate; | ||
1886 | /* | ||
1887 | * We need the current logical link state before | ||
1888 | * lflags are set in handle_e_ibstatuschanged. | ||
1889 | */ | ||
1890 | lstate = qib_7322_iblink_state(ibcs); | ||
1891 | |||
1892 | if (IS_QMH(dd) && !ppd->cpspec->bounced && | ||
1893 | ltstate == IB_PHYSPORTSTATE_LINKUP && | ||
1894 | (lstate >= IB_PORT_INIT && | ||
1895 | lstate <= IB_PORT_ACTIVE)) { | ||
1896 | ppd->cpspec->bounced = 1; | ||
1897 | qib_7322_set_ib_cfg(ppd, QIB_IB_CFG_LSTATE, | ||
1898 | IB_LINKCMD_DOWN | IB_LINKINITCMD_POLL); | ||
1899 | } | ||
1900 | |||
1901 | /* | 1884 | /* |
1902 | * Since going into a recovery state causes the link | 1885 | * Since going into a recovery state causes the link |
1903 | * state to go down and since recovery is transitory, | 1886 | * state to go down and since recovery is transitory, |
@@ -1911,7 +1894,6 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) | |||
1911 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && | 1894 | ltstate != IB_PHYSPORTSTATE_RECOVERY_WAITRMT && |
1912 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) | 1895 | ltstate != IB_PHYSPORTSTATE_RECOVERY_IDLE) |
1913 | qib_handle_e_ibstatuschanged(ppd, ibcs); | 1896 | qib_handle_e_ibstatuschanged(ppd, ibcs); |
1914 | } | ||
1915 | } | 1897 | } |
1916 | if (*msg && iserr) | 1898 | if (*msg && iserr) |
1917 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); | 1899 | qib_dev_porterr(dd, ppd->port, "%s error\n", msg); |
@@ -2381,6 +2363,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd) | |||
2381 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); | 2363 | qib_write_kreg_port(ppd, krp_rcvctrl, ppd->p_rcvctrl); |
2382 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); | 2364 | spin_unlock_irqrestore(&dd->cspec->rcvmod_lock, flags); |
2383 | 2365 | ||
2366 | /* Hold the link state machine for mezz boards */ | ||
2367 | if (IS_QMH(dd) || IS_QME(dd)) | ||
2368 | qib_set_ib_7322_lstate(ppd, 0, | ||
2369 | QLOGIC_IB_IBCC_LINKINITCMD_DISABLE); | ||
2370 | |||
2384 | /* Also enable IBSTATUSCHG interrupt. */ | 2371 | /* Also enable IBSTATUSCHG interrupt. */ |
2385 | val = qib_read_kreg_port(ppd, krp_errmask); | 2372 | val = qib_read_kreg_port(ppd, krp_errmask); |
2386 | qib_write_kreg_port(ppd, krp_errmask, | 2373 | qib_write_kreg_port(ppd, krp_errmask, |
@@ -5702,6 +5689,11 @@ static void set_no_qsfp_atten(struct qib_devdata *dd, int change) | |||
5702 | ppd->cpspec->h1_val = h1; | 5689 | ppd->cpspec->h1_val = h1; |
5703 | /* now change the IBC and serdes, overriding generic */ | 5690 | /* now change the IBC and serdes, overriding generic */ |
5704 | init_txdds_table(ppd, 1); | 5691 | init_txdds_table(ppd, 1); |
5692 | /* Re-enable the physical state machine on mezz boards | ||
5693 | * now that the correct settings have been set. */ | ||
5694 | if (IS_QMH(dd) || IS_QME(dd)) | ||
5695 | qib_set_ib_7322_lstate(ppd, 0, | ||
5696 | QLOGIC_IB_IBCC_LINKINITCMD_SLEEP); | ||
5705 | any++; | 5697 | any++; |
5706 | } | 5698 | } |
5707 | if (*nxt == '\n') | 5699 | if (*nxt == '\n') |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67ce..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1005 | * there are still requests that haven't been acked. | 1005 | * there are still requests that haven't been acked. |
1006 | */ | 1006 | */ |
1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | 1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && |
1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | 1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && |
1009 | (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1009 | start_timer(qp); | 1010 | start_timer(qp); |
1010 | 1011 | ||
1011 | while (qp->s_last != qp->s_acked) { | 1012 | while (qp->s_last != qp->s_acked) { |
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | spin_lock_irqsave(&qp->s_lock, flags); | 1442 | spin_lock_irqsave(&qp->s_lock, flags); |
1443 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1444 | goto ack_done; | ||
1442 | 1445 | ||
1443 | /* Ignore invalid responses. */ | 1446 | /* Ignore invalid responses. */ |
1444 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1447 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |