aboutsummaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:18:21 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-07 15:18:21 -0400
commit972d45fb43f0f0793fa275c4a22998106760cd61 (patch)
treef80ac6698044b179bf3fb9d686bd33083033ccb5 /include/rdma
parent5b6b54982258c330247957a8d877b9851ac69d53 (diff)
parent8d1cc86a6278687efbab7b8c294ab01efe4d4231 (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Convert to NAPI IB: Return "maybe missed event" hint from ib_req_notify_cq() IB: Add CQ comp_vector support IB/ipath: Fix a race condition when generating ACKs IB/ipath: Fix two more spin lock problems IB/fmr_pool: Add prefix to all printks IB/srp: Set proc_name IB/srp: Add orig_dgid sysfs attribute to scsi_host IPoIB/cm: Don't crash if remote side uses one QP for both directions RDMA/cxgb3: Support for new abort logic RDMA/cxgb3: Initialize cpu_idx field in cpl_close_listserv_req message RDMA/cxgb3: Fail qp creation if the requested max_inline is too large RDMA/cxgb3: Fix TERM codes IPoIB/cm: Fix error handling in ipoib_cm_dev_open() IB/ipath: Don't corrupt pending mmap list when unmapped objects are freed IB/mthca: Work around kernel QP starvation IB/ipath: Don't put QP in timeout queue if waiting to send IB/ipath: Don't call spin_lock_irq() from interrupt context
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_verbs.h47
1 files changed, 37 insertions, 10 deletions
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 765589f4d166..5342ac64ed1a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -431,9 +431,11 @@ struct ib_wc {
431 u8 port_num; /* valid only for DR SMPs on switches */ 431 u8 port_num; /* valid only for DR SMPs on switches */
432}; 432};
433 433
434enum ib_cq_notify { 434enum ib_cq_notify_flags {
435 IB_CQ_SOLICITED, 435 IB_CQ_SOLICITED = 1 << 0,
436 IB_CQ_NEXT_COMP 436 IB_CQ_NEXT_COMP = 1 << 1,
437 IB_CQ_SOLICITED_MASK = IB_CQ_SOLICITED | IB_CQ_NEXT_COMP,
438 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
437}; 439};
438 440
439enum ib_srq_attr_mask { 441enum ib_srq_attr_mask {
@@ -912,6 +914,8 @@ struct ib_device {
912 914
913 u32 flags; 915 u32 flags;
914 916
917 int num_comp_vectors;
918
915 struct iw_cm_verbs *iwcm; 919 struct iw_cm_verbs *iwcm;
916 920
917 int (*query_device)(struct ib_device *device, 921 int (*query_device)(struct ib_device *device,
@@ -978,6 +982,7 @@ struct ib_device {
978 struct ib_recv_wr *recv_wr, 982 struct ib_recv_wr *recv_wr,
979 struct ib_recv_wr **bad_recv_wr); 983 struct ib_recv_wr **bad_recv_wr);
980 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, 984 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
985 int comp_vector,
981 struct ib_ucontext *context, 986 struct ib_ucontext *context,
982 struct ib_udata *udata); 987 struct ib_udata *udata);
983 int (*destroy_cq)(struct ib_cq *cq); 988 int (*destroy_cq)(struct ib_cq *cq);
@@ -987,7 +992,7 @@ struct ib_device {
987 struct ib_wc *wc); 992 struct ib_wc *wc);
988 int (*peek_cq)(struct ib_cq *cq, int wc_cnt); 993 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
989 int (*req_notify_cq)(struct ib_cq *cq, 994 int (*req_notify_cq)(struct ib_cq *cq,
990 enum ib_cq_notify cq_notify); 995 enum ib_cq_notify_flags flags);
991 int (*req_ncomp_notif)(struct ib_cq *cq, 996 int (*req_ncomp_notif)(struct ib_cq *cq,
992 int wc_cnt); 997 int wc_cnt);
993 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, 998 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
@@ -1358,13 +1363,15 @@ static inline int ib_post_recv(struct ib_qp *qp,
1358 * @cq_context: Context associated with the CQ returned to the user via 1363 * @cq_context: Context associated with the CQ returned to the user via
1359 * the associated completion and event handlers. 1364 * the associated completion and event handlers.
1360 * @cqe: The minimum size of the CQ. 1365 * @cqe: The minimum size of the CQ.
1366 * @comp_vector - Completion vector used to signal completion events.
1367 * Must be >= 0 and < context->num_comp_vectors.
1361 * 1368 *
1362 * Users can examine the cq structure to determine the actual CQ size. 1369 * Users can examine the cq structure to determine the actual CQ size.
1363 */ 1370 */
1364struct ib_cq *ib_create_cq(struct ib_device *device, 1371struct ib_cq *ib_create_cq(struct ib_device *device,
1365 ib_comp_handler comp_handler, 1372 ib_comp_handler comp_handler,
1366 void (*event_handler)(struct ib_event *, void *), 1373 void (*event_handler)(struct ib_event *, void *),
1367 void *cq_context, int cqe); 1374 void *cq_context, int cqe, int comp_vector);
1368 1375
1369/** 1376/**
1370 * ib_resize_cq - Modifies the capacity of the CQ. 1377 * ib_resize_cq - Modifies the capacity of the CQ.
@@ -1414,14 +1421,34 @@ int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1414/** 1421/**
1415 * ib_req_notify_cq - Request completion notification on a CQ. 1422 * ib_req_notify_cq - Request completion notification on a CQ.
1416 * @cq: The CQ to generate an event for. 1423 * @cq: The CQ to generate an event for.
1417 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will 1424 * @flags:
1418 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, 1425 * Must contain exactly one of %IB_CQ_SOLICITED or %IB_CQ_NEXT_COMP
1419 * notification will occur on the next completion. 1426 * to request an event on the next solicited event or next work
1427 * completion at any type, respectively. %IB_CQ_REPORT_MISSED_EVENTS
1428 * may also be |ed in to request a hint about missed events, as
1429 * described below.
1430 *
1431 * Return Value:
1432 * < 0 means an error occurred while requesting notification
1433 * == 0 means notification was requested successfully, and if
1434 * IB_CQ_REPORT_MISSED_EVENTS was passed in, then no events
1435 * were missed and it is safe to wait for another event. In
1436 * this case is it guaranteed that any work completions added
1437 * to the CQ since the last CQ poll will trigger a completion
1438 * notification event.
1439 * > 0 is only returned if IB_CQ_REPORT_MISSED_EVENTS was passed
1440 * in. It means that the consumer must poll the CQ again to
1441 * make sure it is empty to avoid missing an event because of a
1442 * race between requesting notification and an entry being
1443 * added to the CQ. This return value means it is possible
1444 * (but not guaranteed) that a work completion has been added
1445 * to the CQ since the last poll without triggering a
1446 * completion notification event.
1420 */ 1447 */
1421static inline int ib_req_notify_cq(struct ib_cq *cq, 1448static inline int ib_req_notify_cq(struct ib_cq *cq,
1422 enum ib_cq_notify cq_notify) 1449 enum ib_cq_notify_flags flags)
1423{ 1450{
1424 return cq->device->req_notify_cq(cq, cq_notify); 1451 return cq->device->req_notify_cq(cq, flags);
1425} 1452}
1426 1453
1427/** 1454/**