aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRoland Dreier <rolandd@cisco.com>2007-05-07 00:02:48 -0400
committerRoland Dreier <rolandd@cisco.com>2007-05-07 00:18:11 -0400
commited23a72778f3dbd465e55b06fe31629e7e1dd2f3 (patch)
tree99ab8b4cf7c51ae64b4d3d9108e82b31db2b3465
parentf4fd0b224d60044d2da5ca02f8f2b5150c1d8731 (diff)
IB: Return "maybe missed event" hint from ib_req_notify_cq()
The semantics defined by the InfiniBand specification say that completion events are only generated when a completions is added to a completion queue (CQ) after completion notification is requested. In other words, this means that the following race is possible: while (CQ is not empty) ib_poll_cq(CQ); // new completion is added after while loop is exited ib_req_notify_cq(CQ); // no event is generated for the existing completion To close this race, the IB spec recommends doing another poll of the CQ after requesting notification. However, it is not always possible to arrange code this way (for example, we have found that NAPI for IPoIB cannot poll after requesting notification). Also, some hardware (eg Mellanox HCAs) actually will generate an event for completions added before the call to ib_req_notify_cq() -- which is allowed by the spec, since there's no way for any upper-layer consumer to know exactly when a completion was really added -- so the extra poll of the CQ is just a waste. Motivated by this, we add a new flag "IB_CQ_REPORT_MISSED_EVENTS" for ib_req_notify_cq() so that it can return a hint about whether the a completion may have been added before the request for notification. The return value of ib_req_notify_cq() is extended so: < 0 means an error occurred while requesting notification == 0 means notification was requested successfully, and if IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events were missed and it is safe to wait for another event. > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed in. It means that the consumer must poll the CQ again to make sure it is empty to avoid the race described above. We add a flag to enable this behavior rather than turning it on unconditionally, because checking for missed events may incur significant overhead for some low-level drivers, and consumers that don't care about the results of this test shouldn't be forced to pay for the test. Signed-off-by: Roland Dreier <rolandd@cisco.com>
-rw-r--r--drivers/infiniband/hw/amso1100/c2.h2
-rw-r--r--drivers/infiniband/hw/amso1100/c2_cq.c16
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c8
-rw-r--r--drivers/infiniband/hw/ehca/ehca_iverbs.h2
-rw-r--r--drivers/infiniband/hw/ehca/ehca_reqs.c14
-rw-r--r--drivers/infiniband/hw/ehca/ipz_pt_fn.h8
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h4
-rw-r--r--include/rdma/ib_verbs.h40
12 files changed, 93 insertions, 33 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h
index 04a9db5de881..fa58200217a1 100644
--- a/drivers/infiniband/hw/amso1100/c2.h
+++ b/drivers/infiniband/hw/amso1100/c2.h
@@ -519,7 +519,7 @@ extern void c2_free_cq(struct c2_dev *c2dev, struct c2_cq *cq);
519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index); 519extern void c2_cq_event(struct c2_dev *c2dev, u32 mq_index);
520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index); 520extern void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index);
521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry); 521extern int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 522extern int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
523 523
524/* CM */ 524/* CM */
525extern int c2_llp_connect(struct iw_cm_id *cm_id, 525extern int c2_llp_connect(struct iw_cm_id *cm_id,
diff --git a/drivers/infiniband/hw/amso1100/c2_cq.c b/drivers/infiniband/hw/amso1100/c2_cq.c
index 5175c99ee586..d2b3366786d6 100644
--- a/drivers/infiniband/hw/amso1100/c2_cq.c
+++ b/drivers/infiniband/hw/amso1100/c2_cq.c
@@ -217,17 +217,19 @@ int c2_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
217 return npolled; 217 return npolled;
218} 218}
219 219
220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 220int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
221{ 221{
222 struct c2_mq_shared __iomem *shared; 222 struct c2_mq_shared __iomem *shared;
223 struct c2_cq *cq; 223 struct c2_cq *cq;
224 unsigned long flags;
225 int ret = 0;
224 226
225 cq = to_c2cq(ibcq); 227 cq = to_c2cq(ibcq);
226 shared = cq->mq.peer; 228 shared = cq->mq.peer;
227 229
228 if (notify == IB_CQ_NEXT_COMP) 230 if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_NEXT_COMP)
229 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type); 231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT, &shared->notification_type);
230 else if (notify == IB_CQ_SOLICITED) 232 else if ((notify_flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
231 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type); 233 writeb(C2_CQ_NOTIFICATION_TYPE_NEXT_SE, &shared->notification_type);
232 else 234 else
233 return -EINVAL; 235 return -EINVAL;
@@ -241,7 +243,13 @@ int c2_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
241 */ 243 */
242 readb(&shared->armed); 244 readb(&shared->armed);
243 245
244 return 0; 246 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
247 spin_lock_irqsave(&cq->lock, flags);
248 ret = !c2_mq_empty(&cq->mq);
249 spin_unlock_irqrestore(&cq->lock, flags);
250 }
251
252 return ret;
245} 253}
246 254
247static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) 255static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq)
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index f5e9aeec6f6e..76049afc7655 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -114,7 +114,10 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
114 return -EIO; 114 return -EIO;
115 } 115 }
116 } 116 }
117
118 return 1;
117 } 119 }
120
118 return 0; 121 return 0;
119} 122}
120 123
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 78a495f5332e..a891493fd340 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -292,7 +292,7 @@ static int iwch_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
292#endif 292#endif
293} 293}
294 294
295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 295static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
296{ 296{
297 struct iwch_dev *rhp; 297 struct iwch_dev *rhp;
298 struct iwch_cq *chp; 298 struct iwch_cq *chp;
@@ -303,7 +303,7 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
303 303
304 chp = to_iwch_cq(ibcq); 304 chp = to_iwch_cq(ibcq);
305 rhp = chp->rhp; 305 rhp = chp->rhp;
306 if (notify == IB_CQ_SOLICITED) 306 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
307 cq_op = CQ_ARM_SE; 307 cq_op = CQ_ARM_SE;
308 else 308 else
309 cq_op = CQ_ARM_AN; 309 cq_op = CQ_ARM_AN;
@@ -317,9 +317,11 @@ static int iwch_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr); 317 PDBG("%s rptr 0x%x\n", __FUNCTION__, chp->cq.rptr);
318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0); 318 err = cxio_hal_cq_op(&rhp->rdev, &chp->cq, cq_op, 0);
319 spin_unlock_irqrestore(&chp->lock, flag); 319 spin_unlock_irqrestore(&chp->lock, flag);
320 if (err) 320 if (err < 0)
321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err, 321 printk(KERN_ERR MOD "Error %d rearming CQID 0x%x\n", err,
322 chp->cq.cqid); 322 chp->cq.cqid);
323 if (err > 0 && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
324 err = 0;
323 return err; 325 return err;
324} 326}
325 327
diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h
index aff96ac4fd12..e14b029332c8 100644
--- a/drivers/infiniband/hw/ehca/ehca_iverbs.h
+++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h
@@ -135,7 +135,7 @@ int ehca_poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc);
135 135
136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt); 136int ehca_peek_cq(struct ib_cq *cq, int wc_cnt);
137 137
138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify); 138int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags);
139 139
140struct ib_qp *ehca_create_qp(struct ib_pd *pd, 140struct ib_qp *ehca_create_qp(struct ib_pd *pd,
141 struct ib_qp_init_attr *init_attr, 141 struct ib_qp_init_attr *init_attr,
diff --git a/drivers/infiniband/hw/ehca/ehca_reqs.c b/drivers/infiniband/hw/ehca/ehca_reqs.c
index 08d3f892d9f3..caec9dee09e1 100644
--- a/drivers/infiniband/hw/ehca/ehca_reqs.c
+++ b/drivers/infiniband/hw/ehca/ehca_reqs.c
@@ -634,11 +634,13 @@ poll_cq_exit0:
634 return ret; 634 return ret;
635} 635}
636 636
637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify) 637int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify_flags notify_flags)
638{ 638{
639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); 639 struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq);
640 unsigned long spl_flags;
641 int ret = 0;
640 642
641 switch (cq_notify) { 643 switch (notify_flags & IB_CQ_SOLICITED_MASK) {
642 case IB_CQ_SOLICITED: 644 case IB_CQ_SOLICITED:
643 hipz_set_cqx_n0(my_cq, 1); 645 hipz_set_cqx_n0(my_cq, 1);
644 break; 646 break;
@@ -649,5 +651,11 @@ int ehca_req_notify_cq(struct ib_cq *cq, enum ib_cq_notify cq_notify)
649 return -EINVAL; 651 return -EINVAL;
650 } 652 }
651 653
652 return 0; 654 if (notify_flags & IB_CQ_REPORT_MISSED_EVENTS) {
655 spin_lock_irqsave(&my_cq->spinlock, spl_flags);
656 ret = ipz_qeit_is_valid(&my_cq->ipz_queue);
657 spin_unlock_irqrestore(&my_cq->spinlock, spl_flags);
658 }
659
660 return ret;
653} 661}
diff --git a/drivers/infiniband/hw/ehca/ipz_pt_fn.h b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
index 8199c45768a3..57f141a36bce 100644
--- a/drivers/infiniband/hw/ehca/ipz_pt_fn.h
+++ b/drivers/infiniband/hw/ehca/ipz_pt_fn.h
@@ -140,6 +140,14 @@ static inline void *ipz_qeit_get_inc_valid(struct ipz_queue *queue)
140 return cqe; 140 return cqe;
141} 141}
142 142
143static inline int ipz_qeit_is_valid(struct ipz_queue *queue)
144{
145 struct ehca_cqe *cqe = ipz_qeit_get(queue);
146 u32 cqe_flags = cqe->cqe_flags;
147
148 return cqe_flags >> 7 == (queue->toggle_state & 1);
149}
150
143/* 151/*
144 * returns and resets Queue Entry iterator 152 * returns and resets Queue Entry iterator
145 * returns address (kv) of first Queue Entry 153 * returns address (kv) of first Queue Entry
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 00d3eb9bc696..3e9241badba0 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -334,17 +334,18 @@ int ipath_destroy_cq(struct ib_cq *ibcq)
334/** 334/**
335 * ipath_req_notify_cq - change the notification type for a completion queue 335 * ipath_req_notify_cq - change the notification type for a completion queue
336 * @ibcq: the completion queue 336 * @ibcq: the completion queue
337 * @notify: the type of notification to request 337 * @notify_flags: the type of notification to request
338 * 338 *
339 * Returns 0 for success. 339 * Returns 0 for success.
340 * 340 *
341 * This may be called from interrupt context. Also called by 341 * This may be called from interrupt context. Also called by
342 * ib_req_notify_cq() in the generic verbs code. 342 * ib_req_notify_cq() in the generic verbs code.
343 */ 343 */
344int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 344int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags)
345{ 345{
346 struct ipath_cq *cq = to_icq(ibcq); 346 struct ipath_cq *cq = to_icq(ibcq);
347 unsigned long flags; 347 unsigned long flags;
348 int ret = 0;
348 349
349 spin_lock_irqsave(&cq->lock, flags); 350 spin_lock_irqsave(&cq->lock, flags);
350 /* 351 /*
@@ -352,9 +353,15 @@ int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
352 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2). 353 * any other transitions (see C11-31 and C11-32 in ch. 11.4.2.2).
353 */ 354 */
354 if (cq->notify != IB_CQ_NEXT_COMP) 355 if (cq->notify != IB_CQ_NEXT_COMP)
355 cq->notify = notify; 356 cq->notify = notify_flags & IB_CQ_SOLICITED_MASK;
357
358 if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) &&
359 cq->queue->head != cq->queue->tail)
360 ret = 1;
361
356 spin_unlock_irqrestore(&cq->lock, flags); 362 spin_unlock_irqrestore(&cq->lock, flags);
357 return 0; 363
364 return ret;
358} 365}
359 366
360/** 367/**
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index 2d734fb6eff7..7064fc222727 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -741,7 +741,7 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries, int comp_vec
741 741
742int ipath_destroy_cq(struct ib_cq *ibcq); 742int ipath_destroy_cq(struct ib_cq *ibcq);
743 743
744int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify notify); 744int ipath_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_flags);
745 745
746int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata); 746int ipath_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata);
747 747
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index efd79ef109a6..cf0868f6e965 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -726,11 +726,12 @@ repoll:
726 return err == 0 || err == -EAGAIN ? npolled : err; 726 return err == 0 || err == -EAGAIN ? npolled : err;
727} 727}
728 728
729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) 729int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags)
730{ 730{
731 __be32 doorbell[2]; 731 __be32 doorbell[2];
732 732
733 doorbell[0] = cpu_to_be32((notify == IB_CQ_SOLICITED ? 733 doorbell[0] = cpu_to_be32(((flags & IB_CQ_SOLICITED_MASK) ==
734 IB_CQ_SOLICITED ?
734 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : 735 MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL :
735 MTHCA_TAVOR_CQ_DB_REQ_NOT) | 736 MTHCA_TAVOR_CQ_DB_REQ_NOT) |
736 to_mcq(cq)->cqn); 737 to_mcq(cq)->cqn);
@@ -743,7 +744,7 @@ int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify)
743 return 0; 744 return 0;
744} 745}
745 746
746int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) 747int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
747{ 748{
748 struct mthca_cq *cq = to_mcq(ibcq); 749 struct mthca_cq *cq = to_mcq(ibcq);
749 __be32 doorbell[2]; 750 __be32 doorbell[2];
@@ -755,7 +756,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
755 756
756 doorbell[0] = ci; 757 doorbell[0] = ci;
757 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) | 758 doorbell[1] = cpu_to_be32((cq->cqn << 8) | (2 << 5) | (sn << 3) |
758 (notify == IB_CQ_SOLICITED ? 1 : 2)); 759 ((flags & IB_CQ_SOLICITED_MASK) ==
760 IB_CQ_SOLICITED ? 1 : 2));
759 761
760 mthca_write_db_rec(doorbell, cq->arm_db); 762 mthca_write_db_rec(doorbell, cq->arm_db);
761 763
@@ -766,7 +768,7 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
766 wmb(); 768 wmb();
767 769
768 doorbell[0] = cpu_to_be32((sn << 28) | 770 doorbell[0] = cpu_to_be32((sn << 28) |
769 (notify == IB_CQ_SOLICITED ? 771 ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
770 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : 772 MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL :
771 MTHCA_ARBEL_CQ_DB_REQ_NOT) | 773 MTHCA_ARBEL_CQ_DB_REQ_NOT) |
772 cq->cqn); 774 cq->cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index b7e42efaf43d..9bae3cc60603 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -495,8 +495,8 @@ void mthca_unmap_eq_icm(struct mthca_dev *dev);
495 495
496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, 496int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
497 struct ib_wc *entry); 497 struct ib_wc *entry);
498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 498int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 499int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify_flags flags);
500int mthca_init_cq(struct mthca_dev *dev, int nent, 500int mthca_init_cq(struct mthca_dev *dev, int nent,
501 struct mthca_ucontext *ctx, u32 pdn, 501 struct mthca_ucontext *ctx, u32 pdn,
502 struct mthca_cq *cq); 502 struct mthca_cq *cq);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 17cc309d03ef..5342ac64ed1a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -431,9 +431,11 @@ struct ib_wc {
431 u8 port_num; /* valid only for DR SMPs on switches */ 431 u8 port_num; /* valid only for DR SMPs on switches */
432}; 432};
433 433
434enum ib_cq_notify { 434enum ib_cq_notify_flags {
435 IB_CQ_SOLICITED, 435 IB_CQ_SOLICITED = 1 << 0,
436 IB_CQ_NEXT_COMP 436 IB_CQ_NEXT_COMP = 1 << 1,
437 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
438 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
437}; 439};
438 440
439enum ib_srq_attr_mask { 441enum ib_srq_attr_mask {
@@ -990,7 +992,7 @@ struct ib_device {
990 struct ib_wc *wc); 992 struct ib_wc *wc);
991 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 993 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
992 int (*req_notify_cq)(struct ib_cq *cq, 994 int (*req_notify_cq)(struct ib_cq *cq,
993 enum ib_cq_notify cq_notify); 995 enum ib_cq_notify_flags flags);
994 int (*req_ncomp_notif)(struct ib_cq *cq, 996 int (*req_ncomp_notif)(struct ib_cq *cq,
995 int wc_cnt); 997 int wc_cnt);
996 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 998 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
@@ -1419,14 +1421,34 @@ int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1419/** 1421/**
1420 * ib_req_notify_cq - Request completion notification on a CQ. 1422 * ib_req_notify_cq - Request completion notification on a CQ.
1421 * @cq: The CQ to generate an event for. 1423 * @cq: The CQ to generate an event for.
1422 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will 1424 * @flags:
1423 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, 1425 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1424 * notification will occur on the next completion. 1426 * to request an event on the next solicited event or next work
1427 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1428 * may also be |ed in to request a hint about missed events, as
1429 * described below.
1430 *
1431 * Return Value:
1432 * < 0 means an error occurred while requesting notification
1433 * == 0 means notification was requested successfully, and if
1434 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1435 * were missed and it is safe to wait for another event. In
1436 * this case is it guaranteed that any work completions added
1437 * to the CQ since the last CQ poll will trigger a completion
1438 * notification event.
1439 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1440 * in. It means that the consumer must poll the CQ again to
1441 * make sure it is empty to avoid missing an event because of a
1442 * race between requesting notification and an entry being
1443 * added to the CQ. This return value means it is possible
1444 * (but not guaranteed) that a work completion has been added
1445 * to the CQ since the last poll without triggering a
1446 * completion notification event.
1425 */ 1447 */
1426static inline int ib_req_notify_cq(struct ib_cq *cq, 1448static inline int ib_req_notify_cq(struct ib_cq *cq,
1427 enum ib_cq_notify cq_notify) 1449 enum ib_cq_notify_flags flags)
1428{ 1450{
1429 return cq->device->req_notify_cq(cq, cq_notify); 1451 return cq->device->req_notify_cq(cq, flags);
1430} 1452}
1431 1453
1432/** 1454/**