diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-14 13:09:05 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-14 13:09:05 -0400 |
commit | e3b1fd56f175526db42ae94c457f29c2fa810aca (patch) | |
tree | 3e2948ca44fb7fd5348244c2a83eef864b3110b4 /drivers/infiniband/hw/cxgb4 | |
parent | 0680eb1f485ba5aac2ee02c9f0622239c9a4b16c (diff) | |
parent | d087f6ad724dfbcdc3df8e0191b80d9d8d839e71 (diff) |
Merge tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull infiniband/rdma updates from Roland Dreier:
"Main set of InfiniBand/RDMA updates for 3.17 merge window:
- MR reregistration support
- MAD support for RMPP in userspace
- iSER and SRP initiator updates
- ocrdma hardware driver updates
- other fixes..."
* tag 'rdma-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (52 commits)
IB/srp: Fix return value check in srp_init_module()
RDMA/ocrdma: report asic-id in query device
RDMA/ocrdma: Update sli data structure for endianness
RDMA/ocrdma: Obtain SL from device structure
RDMA/uapi: Include socket.h in rdma_user_cm.h
IB/srpt: Handle GID change events
IB/mlx5: Use ARRAY_SIZE instead of sizeof/sizeof[0]
IB/mlx4: Use ARRAY_SIZE instead of sizeof/sizeof[0]
RDMA/amso1100: Check for integer overflow in c2_alloc_cq_buf()
IPoIB: Remove unnecessary test for NULL before debugfs_remove()
IB/mad: Add user space RMPP support
IB/mad: add new ioctl to ABI to support new registration options
IB/mad: Add dev_notice messages for various umad/mad registration failures
IB/mad: Update module to [pr|dev]_* style print messages
IB/ipoib: Avoid multicast join attempts with invalid P_key
IB/umad: Update module to [pr|dev]_* style print messages
IB/ipoib: Avoid flushing the workqueue from worker context
IB/ipoib: Use P_Key change event instead of P_Key polling mechanism
IB/ipath: Add P_Key change event support
mlx4_core: Add support for secure-host and SMP firewall
...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/ev.c | 1 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/qp.c | 37 | ||||
-rw-r--r-- | drivers/infiniband/hw/cxgb4/t4.h | 11 |
3 files changed, 37 insertions, 12 deletions
diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index fbe6051af254..c9df0549f51d 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c | |||
@@ -227,6 +227,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) | |||
227 | 227 | ||
228 | chp = get_chp(dev, qid); | 228 | chp = get_chp(dev, qid); |
229 | if (chp) { | 229 | if (chp) { |
230 | t4_clear_cq_armed(&chp->cq); | ||
230 | spin_lock_irqsave(&chp->comp_handler_lock, flag); | 231 | spin_lock_irqsave(&chp->comp_handler_lock, flag); |
231 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); | 232 | (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); |
232 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); | 233 | spin_unlock_irqrestore(&chp->comp_handler_lock, flag); |
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index c158fcc02bca..41cd6882b648 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -1105,7 +1105,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
1105 | struct c4iw_cq *schp) | 1105 | struct c4iw_cq *schp) |
1106 | { | 1106 | { |
1107 | int count; | 1107 | int count; |
1108 | int flushed; | 1108 | int rq_flushed, sq_flushed; |
1109 | unsigned long flag; | 1109 | unsigned long flag; |
1110 | 1110 | ||
1111 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); | 1111 | PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); |
@@ -1123,27 +1123,40 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
1123 | 1123 | ||
1124 | c4iw_flush_hw_cq(rchp); | 1124 | c4iw_flush_hw_cq(rchp); |
1125 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); | 1125 | c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); |
1126 | flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); | 1126 | rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); |
1127 | spin_unlock(&qhp->lock); | 1127 | spin_unlock(&qhp->lock); |
1128 | spin_unlock_irqrestore(&rchp->lock, flag); | 1128 | spin_unlock_irqrestore(&rchp->lock, flag); |
1129 | if (flushed) { | ||
1130 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
1131 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); | ||
1132 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
1133 | } | ||
1134 | 1129 | ||
1135 | /* locking hierarchy: cq lock first, then qp lock. */ | 1130 | /* locking hierarchy: cq lock first, then qp lock. */ |
1136 | spin_lock_irqsave(&schp->lock, flag); | 1131 | spin_lock_irqsave(&schp->lock, flag); |
1137 | spin_lock(&qhp->lock); | 1132 | spin_lock(&qhp->lock); |
1138 | if (schp != rchp) | 1133 | if (schp != rchp) |
1139 | c4iw_flush_hw_cq(schp); | 1134 | c4iw_flush_hw_cq(schp); |
1140 | flushed = c4iw_flush_sq(qhp); | 1135 | sq_flushed = c4iw_flush_sq(qhp); |
1141 | spin_unlock(&qhp->lock); | 1136 | spin_unlock(&qhp->lock); |
1142 | spin_unlock_irqrestore(&schp->lock, flag); | 1137 | spin_unlock_irqrestore(&schp->lock, flag); |
1143 | if (flushed) { | 1138 | |
1144 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | 1139 | if (schp == rchp) { |
1145 | (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); | 1140 | if (t4_clear_cq_armed(&rchp->cq) && |
1146 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | 1141 | (rq_flushed || sq_flushed)) { |
1142 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
1143 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | ||
1144 | rchp->ibcq.cq_context); | ||
1145 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
1146 | } | ||
1147 | } else { | ||
1148 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | ||
1149 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | ||
1150 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | ||
1151 | rchp->ibcq.cq_context); | ||
1152 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | ||
1153 | } | ||
1154 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | ||
1155 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | ||
1156 | (*schp->ibcq.comp_handler)(&schp->ibcq, | ||
1157 | schp->ibcq.cq_context); | ||
1158 | spin_unlock_irqrestore(&schp->comp_handler_lock, flag); | ||
1159 | } | ||
1147 | } | 1160 | } |
1148 | } | 1161 | } |
1149 | 1162 | ||
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h index df5edfa31a8f..c04e5134b30c 100644 --- a/drivers/infiniband/hw/cxgb4/t4.h +++ b/drivers/infiniband/hw/cxgb4/t4.h | |||
@@ -524,6 +524,10 @@ static inline int t4_wq_db_enabled(struct t4_wq *wq) | |||
524 | return !wq->rq.queue[wq->rq.size].status.db_off; | 524 | return !wq->rq.queue[wq->rq.size].status.db_off; |
525 | } | 525 | } |
526 | 526 | ||
527 | enum t4_cq_flags { | ||
528 | CQ_ARMED = 1, | ||
529 | }; | ||
530 | |||
527 | struct t4_cq { | 531 | struct t4_cq { |
528 | struct t4_cqe *queue; | 532 | struct t4_cqe *queue; |
529 | dma_addr_t dma_addr; | 533 | dma_addr_t dma_addr; |
@@ -544,12 +548,19 @@ struct t4_cq { | |||
544 | u16 cidx_inc; | 548 | u16 cidx_inc; |
545 | u8 gen; | 549 | u8 gen; |
546 | u8 error; | 550 | u8 error; |
551 | unsigned long flags; | ||
547 | }; | 552 | }; |
548 | 553 | ||
554 | static inline int t4_clear_cq_armed(struct t4_cq *cq) | ||
555 | { | ||
556 | return test_and_clear_bit(CQ_ARMED, &cq->flags); | ||
557 | } | ||
558 | |||
549 | static inline int t4_arm_cq(struct t4_cq *cq, int se) | 559 | static inline int t4_arm_cq(struct t4_cq *cq, int se) |
550 | { | 560 | { |
551 | u32 val; | 561 | u32 val; |
552 | 562 | ||
563 | set_bit(CQ_ARMED, &cq->flags); | ||
553 | while (cq->cidx_inc > CIDXINC_MASK) { | 564 | while (cq->cidx_inc > CIDXINC_MASK) { |
554 | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | | 565 | val = SEINTARM(0) | CIDXINC(CIDXINC_MASK) | TIMERREG(7) | |
555 | INGRESSQID(cq->cqid); | 566 | INGRESSQID(cq->cqid); |