diff options
author | Steve Wise <swise@opengridcomputing.com> | 2010-07-23 15:12:27 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-08-03 00:06:06 -0400 |
commit | 73d6fcad2ac84b6fad326d87dc1dd0b29aabbd34 (patch) | |
tree | c603099ce2df83cf11e311073e13e2b66d8f3f71 /drivers/infiniband/hw | |
parent | d37ac31ddc24c1a0beed134278bc074c98812210 (diff) |
RDMA/cxgb4: Fix race in fini path
There exists a race condition where the app disconnects, which
initiates an orderly close (via rdma_fini()), concurrently with an
ingress abort condition, which initiates an abortive close operation.
Since rdma_fini() must be called without IRQs disabled, the fini can
be called after the QP has been transitioned to ERROR. This is ok,
but we need to protect against qp->ep getting NULLed.
Signed-off-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 657a5b300b23..c9aaf24bf2b4 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -961,7 +961,8 @@ static void flush_qp(struct c4iw_qp *qhp, unsigned long *flag) | |||
961 | __flush_qp(qhp, rchp, schp, flag); | 961 | __flush_qp(qhp, rchp, schp, flag); |
962 | } | 962 | } |
963 | 963 | ||
964 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | 964 | static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, |
965 | struct c4iw_ep *ep) | ||
965 | { | 966 | { |
966 | struct fw_ri_wr *wqe; | 967 | struct fw_ri_wr *wqe; |
967 | int ret; | 968 | int ret; |
@@ -969,12 +970,12 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
969 | struct sk_buff *skb; | 970 | struct sk_buff *skb; |
970 | 971 | ||
971 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 972 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
972 | qhp->ep->hwtid); | 973 | ep->hwtid); |
973 | 974 | ||
974 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); | 975 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL); |
975 | if (!skb) | 976 | if (!skb) |
976 | return -ENOMEM; | 977 | return -ENOMEM; |
977 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | 978 | set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); |
978 | 979 | ||
979 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | 980 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); |
980 | memset(wqe, 0, sizeof *wqe); | 981 | memset(wqe, 0, sizeof *wqe); |
@@ -982,7 +983,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp) | |||
982 | FW_WR_OP(FW_RI_INIT_WR) | | 983 | FW_WR_OP(FW_RI_INIT_WR) | |
983 | FW_WR_COMPL(1)); | 984 | FW_WR_COMPL(1)); |
984 | wqe->flowid_len16 = cpu_to_be32( | 985 | wqe->flowid_len16 = cpu_to_be32( |
985 | FW_WR_FLOWID(qhp->ep->hwtid) | | 986 | FW_WR_FLOWID(ep->hwtid) | |
986 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); | 987 | FW_WR_LEN16(DIV_ROUND_UP(sizeof *wqe, 16))); |
987 | wqe->cookie = (u64)&wr_wait; | 988 | wqe->cookie = (u64)&wr_wait; |
988 | 989 | ||
@@ -1212,17 +1213,16 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1212 | case C4IW_QP_STATE_CLOSING: | 1213 | case C4IW_QP_STATE_CLOSING: |
1213 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); | 1214 | BUG_ON(atomic_read(&qhp->ep->com.kref.refcount) < 2); |
1214 | qhp->attr.state = C4IW_QP_STATE_CLOSING; | 1215 | qhp->attr.state = C4IW_QP_STATE_CLOSING; |
1216 | ep = qhp->ep; | ||
1215 | if (!internal) { | 1217 | if (!internal) { |
1216 | abort = 0; | 1218 | abort = 0; |
1217 | disconnect = 1; | 1219 | disconnect = 1; |
1218 | ep = qhp->ep; | ||
1219 | c4iw_get_ep(&ep->com); | 1220 | c4iw_get_ep(&ep->com); |
1220 | } | 1221 | } |
1221 | spin_unlock_irqrestore(&qhp->lock, flag); | 1222 | spin_unlock_irqrestore(&qhp->lock, flag); |
1222 | ret = rdma_fini(rhp, qhp); | 1223 | ret = rdma_fini(rhp, qhp, ep); |
1223 | spin_lock_irqsave(&qhp->lock, flag); | 1224 | spin_lock_irqsave(&qhp->lock, flag); |
1224 | if (ret) { | 1225 | if (ret) { |
1225 | ep = qhp->ep; | ||
1226 | c4iw_get_ep(&ep->com); | 1226 | c4iw_get_ep(&ep->com); |
1227 | disconnect = abort = 1; | 1227 | disconnect = abort = 1; |
1228 | goto err; | 1228 | goto err; |