aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2005-10-06 16:15:56 -0400
committerRoland Dreier <rolandd@cisco.com>2005-10-17 18:20:28 -0400
commit90f104da22bbf2e2b8a2136c12fb4e013fccf504 (patch)
treed720f4b226cd72903ee878096707578ccc7abc0c /drivers/infiniband/hw/mthca
parent116c0074ecfd6f061570856bec52b691d54dbd3c (diff)
[IB] mthca: SRQ limit reached events
Our hardware supports generating an event when the number of receives posted to a shared receive queue (SRQ) falls below a user-specified limit. Implement mthca_modify_srq() to arm the limit, and add code to handle dispatching SRQ events when they occur. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_eq.c21
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c25
4 files changed, 45 insertions, 4 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 67a2f324a274..6e18c128af42 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -448,6 +448,8 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
448int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, 448int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
449 struct ib_srq_attr *attr, struct mthca_srq *srq); 449 struct ib_srq_attr *attr, struct mthca_srq *srq);
450void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); 450void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq);
451int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
452 enum ib_srq_attr_mask attr_mask);
451void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 453void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
452 enum ib_event_type event_type); 454 enum ib_event_type event_type);
453void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); 455void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr);
diff --git a/drivers/infiniband/hw/mthca/mthca_eq.c b/drivers/infiniband/hw/mthca/mthca_eq.c
index c81fa8e975ef..f2afdc6c7e60 100644
--- a/drivers/infiniband/hw/mthca/mthca_eq.c
+++ b/drivers/infiniband/hw/mthca/mthca_eq.c
@@ -83,7 +83,8 @@ enum {
83 MTHCA_EVENT_TYPE_PATH_MIG = 0x01, 83 MTHCA_EVENT_TYPE_PATH_MIG = 0x01,
84 MTHCA_EVENT_TYPE_COMM_EST = 0x02, 84 MTHCA_EVENT_TYPE_COMM_EST = 0x02,
85 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, 85 MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03,
86 MTHCA_EVENT_TYPE_SRQ_LAST_WQE = 0x13, 86 MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13,
87 MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14,
87 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, 88 MTHCA_EVENT_TYPE_CQ_ERROR = 0x04,
88 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 89 MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
89 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, 90 MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06,
@@ -110,8 +111,9 @@ enum {
110 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ 111 (1ULL << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \
111 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ 112 (1ULL << MTHCA_EVENT_TYPE_PORT_CHANGE) | \
112 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT)) 113 (1ULL << MTHCA_EVENT_TYPE_ECC_DETECT))
113#define MTHCA_SRQ_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ 114#define MTHCA_SRQ_EVENT_MASK ((1ULL << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \
114 (1ULL << MTHCA_EVENT_TYPE_SRQ_LAST_WQE) 115 (1ULL << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
116 (1ULL << MTHCA_EVENT_TYPE_SRQ_LIMIT))
115#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD) 117#define MTHCA_CMD_EVENT_MASK (1ULL << MTHCA_EVENT_TYPE_CMD)
116 118
117#define MTHCA_EQ_DB_INC_CI (1 << 24) 119#define MTHCA_EQ_DB_INC_CI (1 << 24)
@@ -142,6 +144,9 @@ struct mthca_eqe {
142 __be32 qpn; 144 __be32 qpn;
143 } __attribute__((packed)) qp; 145 } __attribute__((packed)) qp;
144 struct { 146 struct {
147 __be32 srqn;
148 } __attribute__((packed)) srq;
149 struct {
145 __be32 cqn; 150 __be32 cqn;
146 u32 reserved1; 151 u32 reserved1;
147 u8 reserved2[3]; 152 u8 reserved2[3];
@@ -305,6 +310,16 @@ static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq)
305 IB_EVENT_SQ_DRAINED); 310 IB_EVENT_SQ_DRAINED);
306 break; 311 break;
307 312
313 case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE:
314 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
315 IB_EVENT_QP_LAST_WQE_REACHED);
316 break;
317
318 case MTHCA_EVENT_TYPE_SRQ_LIMIT:
319 mthca_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
320 IB_EVENT_SRQ_LIMIT_REACHED);
321 break;
322
308 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: 323 case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR:
309 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff, 324 mthca_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
310 IB_EVENT_QP_FATAL); 325 IB_EVENT_QP_FATAL);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 4e87ad115712..53b29a0841bf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1084,6 +1084,7 @@ int mthca_register_device(struct mthca_dev *dev)
1084 1084
1085 if (dev->mthca_flags & MTHCA_FLAG_SRQ) { 1085 if (dev->mthca_flags & MTHCA_FLAG_SRQ) {
1086 dev->ib_dev.create_srq = mthca_create_srq; 1086 dev->ib_dev.create_srq = mthca_create_srq;
1087 dev->ib_dev.modify_srq = mthca_modify_srq;
1087 dev->ib_dev.destroy_srq = mthca_destroy_srq; 1088 dev->ib_dev.destroy_srq = mthca_destroy_srq;
1088 1089
1089 if (mthca_is_memfree(dev)) 1090 if (mthca_is_memfree(dev))
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 18998d48c53e..13d2290261d9 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -332,6 +332,29 @@ void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
332 mthca_free_mailbox(dev, mailbox); 332 mthca_free_mailbox(dev, mailbox);
333} 333}
334 334
335int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
336 enum ib_srq_attr_mask attr_mask)
337{
338 struct mthca_dev *dev = to_mdev(ibsrq->device);
339 struct mthca_srq *srq = to_msrq(ibsrq);
340 int ret;
341 u8 status;
342
343 /* We don't support resizing SRQs (yet?) */
344 if (attr_mask & IB_SRQ_MAX_WR)
345 return -EINVAL;
346
347 if (attr_mask & IB_SRQ_LIMIT) {
348 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status);
349 if (ret)
350 return ret;
351 if (status)
352 return -EINVAL;
353 }
354
355 return 0;
356}
357
335void mthca_srq_event(struct mthca_dev *dev, u32 srqn, 358void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
336 enum ib_event_type event_type) 359 enum ib_event_type event_type)
337{ 360{
@@ -354,7 +377,7 @@ void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
354 377
355 event.device = &dev->ib_dev; 378 event.device = &dev->ib_dev;
356 event.event = event_type; 379 event.event = event_type;
357 event.element.srq = &srq->ibsrq; 380 event.element.srq = &srq->ibsrq;
358 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); 381 srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
359 382
360out: 383out: