diff options
author | Roland Dreier <rolandd@cisco.com> | 2010-05-05 17:45:40 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2010-05-05 17:45:40 -0400 |
commit | be4c9bad9d0edb6bc3bd8fffc2f98e0e2112da39 (patch) | |
tree | 1fbe204cb8f386e35581bd9fa8ea835950b076c4 /drivers/infiniband/hw/cxgb4/qp.c | |
parent | cfdda9d764362ab77b11a410bb928400e6520d57 (diff) |
MAINTAINERS: Add cxgb4 and iw_cxgb4 entries
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/qp.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 28 |
1 files changed, 14 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bd56c841ef75..83a01dc0c4c1 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -856,7 +856,8 @@ int c4iw_post_zb_read(struct c4iw_qp *qhp) | |||
856 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | 856 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); |
857 | } | 857 | } |
858 | 858 | ||
859 | int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) | 859 | static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, |
860 | gfp_t gfp) | ||
860 | { | 861 | { |
861 | struct fw_ri_wr *wqe; | 862 | struct fw_ri_wr *wqe; |
862 | struct sk_buff *skb; | 863 | struct sk_buff *skb; |
@@ -865,9 +866,9 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) | |||
865 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, | 866 | PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, |
866 | qhp->ep->hwtid); | 867 | qhp->ep->hwtid); |
867 | 868 | ||
868 | skb = alloc_skb(sizeof *wqe, GFP_KERNEL | __GFP_NOFAIL); | 869 | skb = alloc_skb(sizeof *wqe, gfp); |
869 | if (!skb) | 870 | if (!skb) |
870 | return -ENOMEM; | 871 | return; |
871 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); | 872 | set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx); |
872 | 873 | ||
873 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); | 874 | wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe)); |
@@ -881,7 +882,7 @@ int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe) | |||
881 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); | 882 | wqe->u.terminate.immdlen = cpu_to_be32(sizeof *term); |
882 | term = (struct terminate_message *)wqe->u.terminate.termmsg; | 883 | term = (struct terminate_message *)wqe->u.terminate.termmsg; |
883 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); | 884 | build_term_codes(err_cqe, &term->layer_etype, &term->ecode); |
884 | return c4iw_ofld_send(&qhp->rhp->rdev, skb); | 885 | c4iw_ofld_send(&qhp->rhp->rdev, skb); |
885 | } | 886 | } |
886 | 887 | ||
887 | /* | 888 | /* |
@@ -1130,14 +1131,14 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1130 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) | 1131 | if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND) |
1131 | newattr.enable_bind = attrs->enable_bind; | 1132 | newattr.enable_bind = attrs->enable_bind; |
1132 | if (mask & C4IW_QP_ATTR_MAX_ORD) { | 1133 | if (mask & C4IW_QP_ATTR_MAX_ORD) { |
1133 | if (attrs->max_ord > T4_MAX_READ_DEPTH) { | 1134 | if (attrs->max_ord > c4iw_max_read_depth) { |
1134 | ret = -EINVAL; | 1135 | ret = -EINVAL; |
1135 | goto out; | 1136 | goto out; |
1136 | } | 1137 | } |
1137 | newattr.max_ord = attrs->max_ord; | 1138 | newattr.max_ord = attrs->max_ord; |
1138 | } | 1139 | } |
1139 | if (mask & C4IW_QP_ATTR_MAX_IRD) { | 1140 | if (mask & C4IW_QP_ATTR_MAX_IRD) { |
1140 | if (attrs->max_ird > T4_MAX_READ_DEPTH) { | 1141 | if (attrs->max_ird > c4iw_max_read_depth) { |
1141 | ret = -EINVAL; | 1142 | ret = -EINVAL; |
1142 | goto out; | 1143 | goto out; |
1143 | } | 1144 | } |
@@ -1215,12 +1216,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, | |||
1215 | qhp->attr.state = C4IW_QP_STATE_TERMINATE; | 1216 | qhp->attr.state = C4IW_QP_STATE_TERMINATE; |
1216 | if (qhp->ibqp.uobject) | 1217 | if (qhp->ibqp.uobject) |
1217 | t4_set_wq_in_error(&qhp->wq); | 1218 | t4_set_wq_in_error(&qhp->wq); |
1218 | if (!internal) { | 1219 | ep = qhp->ep; |
1219 | ep = qhp->ep; | 1220 | c4iw_get_ep(&ep->com); |
1220 | c4iw_get_ep(&ep->com); | 1221 | terminate = 1; |
1221 | terminate = 1; | 1222 | disconnect = 1; |
1222 | disconnect = 1; | ||
1223 | } | ||
1224 | break; | 1223 | break; |
1225 | case C4IW_QP_STATE_ERROR: | 1224 | case C4IW_QP_STATE_ERROR: |
1226 | qhp->attr.state = C4IW_QP_STATE_ERROR; | 1225 | qhp->attr.state = C4IW_QP_STATE_ERROR; |
@@ -1301,7 +1300,7 @@ out: | |||
1301 | spin_unlock_irqrestore(&qhp->lock, flag); | 1300 | spin_unlock_irqrestore(&qhp->lock, flag); |
1302 | 1301 | ||
1303 | if (terminate) | 1302 | if (terminate) |
1304 | c4iw_post_terminate(qhp, NULL); | 1303 | post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL); |
1305 | 1304 | ||
1306 | /* | 1305 | /* |
1307 | * If disconnect is 1, then we need to initiate a disconnect | 1306 | * If disconnect is 1, then we need to initiate a disconnect |
@@ -1309,7 +1308,8 @@ out: | |||
1309 | * an abnormal close (RTS/CLOSING->ERROR). | 1308 | * an abnormal close (RTS/CLOSING->ERROR). |
1310 | */ | 1309 | */ |
1311 | if (disconnect) { | 1310 | if (disconnect) { |
1312 | c4iw_ep_disconnect(ep, abort, GFP_KERNEL); | 1311 | c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC : |
1312 | GFP_KERNEL); | ||
1313 | c4iw_put_ep(&ep->com); | 1313 | c4iw_put_ep(&ep->com); |
1314 | } | 1314 | } |
1315 | 1315 | ||