aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/cxgb4
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /drivers/infiniband/hw/cxgb4
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'drivers/infiniband/hw/cxgb4')
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h5
3 files changed, 15 insertions, 6 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 3aca7f6171b4..b6a953aed7e8 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1827,8 +1827,12 @@ static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1827 (ep->mpa_pkt + sizeof(*mpa)); 1827 (ep->mpa_pkt + sizeof(*mpa));
1828 ep->ird = ntohs(mpa_v2_params->ird) & 1828 ep->ird = ntohs(mpa_v2_params->ird) &
1829 MPA_V2_IRD_ORD_MASK; 1829 MPA_V2_IRD_ORD_MASK;
1830 ep->ird = min_t(u32, ep->ird,
1831 cur_max_read_depth(ep->com.dev));
1830 ep->ord = ntohs(mpa_v2_params->ord) & 1832 ep->ord = ntohs(mpa_v2_params->ord) &
1831 MPA_V2_IRD_ORD_MASK; 1833 MPA_V2_IRD_ORD_MASK;
1834 ep->ord = min_t(u32, ep->ord,
1835 cur_max_read_depth(ep->com.dev));
1832 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1836 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
1833 ep->ord); 1837 ep->ord);
1834 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1838 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
@@ -3136,7 +3140,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3136 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3140 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3137 if (conn_param->ord > ep->ird) { 3141 if (conn_param->ord > ep->ird) {
3138 if (RELAXED_IRD_NEGOTIATION) { 3142 if (RELAXED_IRD_NEGOTIATION) {
3139 ep->ord = ep->ird; 3143 conn_param->ord = ep->ird;
3140 } else { 3144 } else {
3141 ep->ird = conn_param->ird; 3145 ep->ird = conn_param->ird;
3142 ep->ord = conn_param->ord; 3146 ep->ord = conn_param->ord;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 812ab7278b8e..ac926c942fee 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -1016,15 +1016,15 @@ int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
1016int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) 1016int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
1017{ 1017{
1018 struct c4iw_cq *chp; 1018 struct c4iw_cq *chp;
1019 int ret; 1019 int ret = 0;
1020 unsigned long flag; 1020 unsigned long flag;
1021 1021
1022 chp = to_c4iw_cq(ibcq); 1022 chp = to_c4iw_cq(ibcq);
1023 spin_lock_irqsave(&chp->lock, flag); 1023 spin_lock_irqsave(&chp->lock, flag);
1024 ret = t4_arm_cq(&chp->cq, 1024 t4_arm_cq(&chp->cq,
1025 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); 1025 (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
1026 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1027 ret = t4_cq_notempty(&chp->cq);
1026 spin_unlock_irqrestore(&chp->lock, flag); 1028 spin_unlock_irqrestore(&chp->lock, flag);
1027 if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
1028 ret = 0;
1029 return ret; 1029 return ret;
1030} 1030}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 6126bbe36095..02173f4315fa 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -634,6 +634,11 @@ static inline int t4_valid_cqe(struct t4_cq *cq, struct t4_cqe *cqe)
634 return (CQE_GENBIT(cqe) == cq->gen); 634 return (CQE_GENBIT(cqe) == cq->gen);
635} 635}
636 636
637static inline int t4_cq_notempty(struct t4_cq *cq)
638{
639 return cq->sw_in_use || t4_valid_cqe(cq, &cq->queue[cq->cidx]);
640}
641
637static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) 642static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
638{ 643{
639 int ret; 644 int ret;