aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorVipul Pandya <vipul@chelsio.com>2012-05-18 05:59:29 -0400
committerRoland Dreier <roland@purestorage.com>2012-05-18 16:22:32 -0400
commit4984037bef54253d4d010d3e57f175ab694bee26 (patch)
tree1519c324e47960546d66606ab4fa5a63fa203994 /drivers/infiniband
parent2c97478106880a5fb241a473252e61845a69386e (diff)
RDMA/cxgb4: Disable interrupts in c4iw_ev_dispatch()
Use GFP_ATOMIC in _insert_handle() if ints are disabled. Don't panic if we get an abort with no endpoint found. Just log a warning. Signed-off-by: Vipul Pandya <vipul@chelsio.com> Signed-off-by: Steve Wise <swise@opengridcomputing.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c5
-rw-r--r--drivers/infiniband/hw/cxgb4/ev.c8
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
3 files changed, 9 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 4c7c62fe49d3..6ce401abdbd0 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1362,7 +1362,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1362 1362
1363 ep = lookup_tid(t, tid); 1363 ep = lookup_tid(t, tid);
1364 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1364 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1365 BUG_ON(!ep); 1365 if (!ep) {
1366 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n");
1367 return 0;
1368 }
1366 mutex_lock(&ep->com.mutex); 1369 mutex_lock(&ep->com.mutex);
1367 switch (ep->com.state) { 1370 switch (ep->com.state) {
1368 case ABORTING: 1371 case ABORTING:
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c
index 397cb36cf103..cf2f6b47617a 100644
--- a/drivers/infiniband/hw/cxgb4/ev.c
+++ b/drivers/infiniband/hw/cxgb4/ev.c
@@ -84,7 +84,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
84 struct c4iw_qp *qhp; 84 struct c4iw_qp *qhp;
85 u32 cqid; 85 u32 cqid;
86 86
87 spin_lock(&dev->lock); 87 spin_lock_irq(&dev->lock);
88 qhp = get_qhp(dev, CQE_QPID(err_cqe)); 88 qhp = get_qhp(dev, CQE_QPID(err_cqe));
89 if (!qhp) { 89 if (!qhp) {
90 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " 90 printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d "
@@ -93,7 +93,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
93 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), 93 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
94 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), 94 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
95 CQE_WRID_LOW(err_cqe)); 95 CQE_WRID_LOW(err_cqe));
96 spin_unlock(&dev->lock); 96 spin_unlock_irq(&dev->lock);
97 goto out; 97 goto out;
98 } 98 }
99 99
@@ -109,13 +109,13 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
109 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), 109 CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe),
110 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), 110 CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe),
111 CQE_WRID_LOW(err_cqe)); 111 CQE_WRID_LOW(err_cqe));
112 spin_unlock(&dev->lock); 112 spin_unlock_irq(&dev->lock);
113 goto out; 113 goto out;
114 } 114 }
115 115
116 c4iw_qp_add_ref(&qhp->ibqp); 116 c4iw_qp_add_ref(&qhp->ibqp);
117 atomic_inc(&chp->refcnt); 117 atomic_inc(&chp->refcnt);
118 spin_unlock(&dev->lock); 118 spin_unlock_irq(&dev->lock);
119 119
120 /* Bad incoming write */ 120 /* Bad incoming write */
121 if (RQ_TYPE(err_cqe) && 121 if (RQ_TYPE(err_cqe) &&
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index a11ed5ce536a..e8b88a02cc77 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -246,7 +246,7 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,
246 int newid; 246 int newid;
247 247
248 do { 248 do {
249 if (!idr_pre_get(idr, GFP_KERNEL)) 249 if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC))
250 return -ENOMEM; 250 return -ENOMEM;
251 if (lock) 251 if (lock)
252 spin_lock_irq(&rhp->lock); 252 spin_lock_irq(&rhp->lock);