aboutsummaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 13:51:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-11-01 13:51:38 -0400
commitf470f8d4e702593ee1d0852871ad80373bce707b (patch)
tree85a67e65c5e5b9777639bd8f4c763a4cf8787e0e /include/rdma
parentdc47d3810cdcb4f32bfa31d50f26af97aced0638 (diff)
parent504255f8d0480cf293962adf4bc3aecac645ae71 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (62 commits) mlx4_core: Deprecate log_num_vlan module param IB/mlx4: Don't set VLAN in IBoE WQEs' control segment IB/mlx4: Enable 4K mtu for IBoE RDMA/cxgb4: Mark QP in error before disabling the queue in firmware RDMA/cxgb4: Serialize calls to CQ's comp_handler RDMA/cxgb3: Serialize calls to CQ's comp_handler IB/qib: Fix issue with link states and QSFP cables IB/mlx4: Configure extended active speeds mlx4_core: Add extended port capabilities support IB/qib: Hold links until tuning data is available IB/qib: Clean up checkpatch issue IB/qib: Remove s_lock around header validation IB/qib: Precompute timeout jiffies to optimize latency IB/qib: Use RCU for qpn lookup IB/qib: Eliminate divide/mod in converting idx to egr buf pointer IB/qib: Decode path MTU optimization IB/qib: Optimize RC/UC code by IB operation IPoIB: Use the right function to do DMA unmap pages RDMA/cxgb4: Use correct QID in insert_recv_cqe() RDMA/cxgb4: Make sure flush CQ entries are collected on connection close ...
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_user_verbs.h48
-rw-r--r--include/rdma/ib_verbs.h106
-rw-r--r--include/rdma/iw_cm.h4
-rw-r--r--include/rdma/rdma_cm.h1
-rw-r--r--include/rdma/rdma_user_cm.h3
5 files changed, 156 insertions, 6 deletions
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index fe5b05177a2c..81aba3a73aa3 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -81,7 +81,11 @@ enum {
81 IB_USER_VERBS_CMD_MODIFY_SRQ, 81 IB_USER_VERBS_CMD_MODIFY_SRQ,
82 IB_USER_VERBS_CMD_QUERY_SRQ, 82 IB_USER_VERBS_CMD_QUERY_SRQ,
83 IB_USER_VERBS_CMD_DESTROY_SRQ, 83 IB_USER_VERBS_CMD_DESTROY_SRQ,
84 IB_USER_VERBS_CMD_POST_SRQ_RECV 84 IB_USER_VERBS_CMD_POST_SRQ_RECV,
85 IB_USER_VERBS_CMD_OPEN_XRCD,
86 IB_USER_VERBS_CMD_CLOSE_XRCD,
87 IB_USER_VERBS_CMD_CREATE_XSRQ,
88 IB_USER_VERBS_CMD_OPEN_QP
85}; 89};
86 90
87/* 91/*
@@ -222,6 +226,21 @@ struct ib_uverbs_dealloc_pd {
222 __u32 pd_handle; 226 __u32 pd_handle;
223}; 227};
224 228
229struct ib_uverbs_open_xrcd {
230 __u64 response;
231 __u32 fd;
232 __u32 oflags;
233 __u64 driver_data[0];
234};
235
236struct ib_uverbs_open_xrcd_resp {
237 __u32 xrcd_handle;
238};
239
240struct ib_uverbs_close_xrcd {
241 __u32 xrcd_handle;
242};
243
225struct ib_uverbs_reg_mr { 244struct ib_uverbs_reg_mr {
226 __u64 response; 245 __u64 response;
227 __u64 start; 246 __u64 start;
@@ -404,6 +423,17 @@ struct ib_uverbs_create_qp {
404 __u64 driver_data[0]; 423 __u64 driver_data[0];
405}; 424};
406 425
426struct ib_uverbs_open_qp {
427 __u64 response;
428 __u64 user_handle;
429 __u32 pd_handle;
430 __u32 qpn;
431 __u8 qp_type;
432 __u8 reserved[7];
433 __u64 driver_data[0];
434};
435
436/* also used for open response */
407struct ib_uverbs_create_qp_resp { 437struct ib_uverbs_create_qp_resp {
408 __u32 qp_handle; 438 __u32 qp_handle;
409 __u32 qpn; 439 __u32 qpn;
@@ -648,11 +678,25 @@ struct ib_uverbs_create_srq {
648 __u64 driver_data[0]; 678 __u64 driver_data[0];
649}; 679};
650 680
681struct ib_uverbs_create_xsrq {
682 __u64 response;
683 __u64 user_handle;
684 __u32 srq_type;
685 __u32 pd_handle;
686 __u32 max_wr;
687 __u32 max_sge;
688 __u32 srq_limit;
689 __u32 reserved;
690 __u32 xrcd_handle;
691 __u32 cq_handle;
692 __u64 driver_data[0];
693};
694
651struct ib_uverbs_create_srq_resp { 695struct ib_uverbs_create_srq_resp {
652 __u32 srq_handle; 696 __u32 srq_handle;
653 __u32 max_wr; 697 __u32 max_wr;
654 __u32 max_sge; 698 __u32 max_sge;
655 __u32 reserved; 699 __u32 srqn;
656}; 700};
657 701
658struct ib_uverbs_modify_srq { 702struct ib_uverbs_modify_srq {
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 228be3e220d9..bf5daafe8ecc 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -112,6 +112,7 @@ enum ib_device_cap_flags {
112 */ 112 */
113 IB_DEVICE_UD_IP_CSUM = (1<<18), 113 IB_DEVICE_UD_IP_CSUM = (1<<18),
114 IB_DEVICE_UD_TSO = (1<<19), 114 IB_DEVICE_UD_TSO = (1<<19),
115 IB_DEVICE_XRC = (1<<20),
115 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21), 116 IB_DEVICE_MEM_MGT_EXTENSIONS = (1<<21),
116 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22), 117 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK = (1<<22),
117}; 118};
@@ -207,6 +208,7 @@ enum ib_port_cap_flags {
207 IB_PORT_SM_DISABLED = 1 << 10, 208 IB_PORT_SM_DISABLED = 1 << 10,
208 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, 209 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
209 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, 210 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
211 IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
210 IB_PORT_CM_SUP = 1 << 16, 212 IB_PORT_CM_SUP = 1 << 16,
211 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, 213 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
212 IB_PORT_REINIT_SUP = 1 << 18, 214 IB_PORT_REINIT_SUP = 1 << 18,
@@ -415,7 +417,15 @@ enum ib_rate {
415 IB_RATE_40_GBPS = 7, 417 IB_RATE_40_GBPS = 7,
416 IB_RATE_60_GBPS = 8, 418 IB_RATE_60_GBPS = 8,
417 IB_RATE_80_GBPS = 9, 419 IB_RATE_80_GBPS = 9,
418 IB_RATE_120_GBPS = 10 420 IB_RATE_120_GBPS = 10,
421 IB_RATE_14_GBPS = 11,
422 IB_RATE_56_GBPS = 12,
423 IB_RATE_112_GBPS = 13,
424 IB_RATE_168_GBPS = 14,
425 IB_RATE_25_GBPS = 15,
426 IB_RATE_100_GBPS = 16,
427 IB_RATE_200_GBPS = 17,
428 IB_RATE_300_GBPS = 18
419}; 429};
420 430
421/** 431/**
@@ -427,6 +437,13 @@ enum ib_rate {
427int ib_rate_to_mult(enum ib_rate rate) __attribute_const__; 437int ib_rate_to_mult(enum ib_rate rate) __attribute_const__;
428 438
429/** 439/**
440 * ib_rate_to_mbps - Convert the IB rate enum to Mbps.
441 * For example, IB_RATE_2_5_GBPS will be converted to 2500.
442 * @rate: rate to convert.
443 */
444int ib_rate_to_mbps(enum ib_rate rate) __attribute_const__;
445
446/**
430 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate 447 * mult_to_ib_rate - Convert a multiple of 2.5 Gbit/sec to an IB rate
431 * enum. 448 * enum.
432 * @mult: multiple to convert. 449 * @mult: multiple to convert.
@@ -522,6 +539,11 @@ enum ib_cq_notify_flags {
522 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2, 539 IB_CQ_REPORT_MISSED_EVENTS = 1 << 2,
523}; 540};
524 541
542enum ib_srq_type {
543 IB_SRQT_BASIC,
544 IB_SRQT_XRC
545};
546
525enum ib_srq_attr_mask { 547enum ib_srq_attr_mask {
526 IB_SRQ_MAX_WR = 1 << 0, 548 IB_SRQ_MAX_WR = 1 << 0,
527 IB_SRQ_LIMIT = 1 << 1, 549 IB_SRQ_LIMIT = 1 << 1,
@@ -537,6 +559,14 @@ struct ib_srq_init_attr {
537 void (*event_handler)(struct ib_event *, void *); 559 void (*event_handler)(struct ib_event *, void *);
538 void *srq_context; 560 void *srq_context;
539 struct ib_srq_attr attr; 561 struct ib_srq_attr attr;
562 enum ib_srq_type srq_type;
563
564 union {
565 struct {
566 struct ib_xrcd *xrcd;
567 struct ib_cq *cq;
568 } xrc;
569 } ext;
540}; 570};
541 571
542struct ib_qp_cap { 572struct ib_qp_cap {
@@ -565,7 +595,11 @@ enum ib_qp_type {
565 IB_QPT_UC, 595 IB_QPT_UC,
566 IB_QPT_UD, 596 IB_QPT_UD,
567 IB_QPT_RAW_IPV6, 597 IB_QPT_RAW_IPV6,
568 IB_QPT_RAW_ETHERTYPE 598 IB_QPT_RAW_ETHERTYPE,
599 /* Save 8 for RAW_PACKET */
600 IB_QPT_XRC_INI = 9,
601 IB_QPT_XRC_TGT,
602 IB_QPT_MAX
569}; 603};
570 604
571enum ib_qp_create_flags { 605enum ib_qp_create_flags {
@@ -579,6 +613,7 @@ struct ib_qp_init_attr {
579 struct ib_cq *send_cq; 613 struct ib_cq *send_cq;
580 struct ib_cq *recv_cq; 614 struct ib_cq *recv_cq;
581 struct ib_srq *srq; 615 struct ib_srq *srq;
616 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
582 struct ib_qp_cap cap; 617 struct ib_qp_cap cap;
583 enum ib_sig_type sq_sig_type; 618 enum ib_sig_type sq_sig_type;
584 enum ib_qp_type qp_type; 619 enum ib_qp_type qp_type;
@@ -586,6 +621,13 @@ struct ib_qp_init_attr {
586 u8 port_num; /* special QP types only */ 621 u8 port_num; /* special QP types only */
587}; 622};
588 623
624struct ib_qp_open_attr {
625 void (*event_handler)(struct ib_event *, void *);
626 void *qp_context;
627 u32 qp_num;
628 enum ib_qp_type qp_type;
629};
630
589enum ib_rnr_timeout { 631enum ib_rnr_timeout {
590 IB_RNR_TIMER_655_36 = 0, 632 IB_RNR_TIMER_655_36 = 0,
591 IB_RNR_TIMER_000_01 = 1, 633 IB_RNR_TIMER_000_01 = 1,
@@ -770,6 +812,7 @@ struct ib_send_wr {
770 u32 rkey; 812 u32 rkey;
771 } fast_reg; 813 } fast_reg;
772 } wr; 814 } wr;
815 u32 xrc_remote_srq_num; /* XRC TGT QPs only */
773}; 816};
774 817
775struct ib_recv_wr { 818struct ib_recv_wr {
@@ -831,6 +874,7 @@ struct ib_ucontext {
831 struct list_head qp_list; 874 struct list_head qp_list;
832 struct list_head srq_list; 875 struct list_head srq_list;
833 struct list_head ah_list; 876 struct list_head ah_list;
877 struct list_head xrcd_list;
834 int closing; 878 int closing;
835}; 879};
836 880
@@ -858,6 +902,15 @@ struct ib_pd {
858 atomic_t usecnt; /* count all resources */ 902 atomic_t usecnt; /* count all resources */
859}; 903};
860 904
905struct ib_xrcd {
906 struct ib_device *device;
907 atomic_t usecnt; /* count all exposed resources */
908 struct inode *inode;
909
910 struct mutex tgt_qp_mutex;
911 struct list_head tgt_qp_list;
912};
913
861struct ib_ah { 914struct ib_ah {
862 struct ib_device *device; 915 struct ib_device *device;
863 struct ib_pd *pd; 916 struct ib_pd *pd;
@@ -882,7 +935,16 @@ struct ib_srq {
882 struct ib_uobject *uobject; 935 struct ib_uobject *uobject;
883 void (*event_handler)(struct ib_event *, void *); 936 void (*event_handler)(struct ib_event *, void *);
884 void *srq_context; 937 void *srq_context;
938 enum ib_srq_type srq_type;
885 atomic_t usecnt; 939 atomic_t usecnt;
940
941 union {
942 struct {
943 struct ib_xrcd *xrcd;
944 struct ib_cq *cq;
945 u32 srq_num;
946 } xrc;
947 } ext;
886}; 948};
887 949
888struct ib_qp { 950struct ib_qp {
@@ -891,6 +953,11 @@ struct ib_qp {
891 struct ib_cq *send_cq; 953 struct ib_cq *send_cq;
892 struct ib_cq *recv_cq; 954 struct ib_cq *recv_cq;
893 struct ib_srq *srq; 955 struct ib_srq *srq;
956 struct ib_xrcd *xrcd; /* XRC TGT QPs only */
957 struct list_head xrcd_list;
958 atomic_t usecnt; /* count times opened */
959 struct list_head open_list;
960 struct ib_qp *real_qp;
894 struct ib_uobject *uobject; 961 struct ib_uobject *uobject;
895 void (*event_handler)(struct ib_event *, void *); 962 void (*event_handler)(struct ib_event *, void *);
896 void *qp_context; 963 void *qp_context;
@@ -1149,6 +1216,10 @@ struct ib_device {
1149 struct ib_grh *in_grh, 1216 struct ib_grh *in_grh,
1150 struct ib_mad *in_mad, 1217 struct ib_mad *in_mad,
1151 struct ib_mad *out_mad); 1218 struct ib_mad *out_mad);
1219 struct ib_xrcd * (*alloc_xrcd)(struct ib_device *device,
1220 struct ib_ucontext *ucontext,
1221 struct ib_udata *udata);
1222 int (*dealloc_xrcd)(struct ib_xrcd *xrcd);
1152 1223
1153 struct ib_dma_mapping_ops *dma_ops; 1224 struct ib_dma_mapping_ops *dma_ops;
1154 1225
@@ -1443,6 +1514,25 @@ int ib_query_qp(struct ib_qp *qp,
1443int ib_destroy_qp(struct ib_qp *qp); 1514int ib_destroy_qp(struct ib_qp *qp);
1444 1515
1445/** 1516/**
1517 * ib_open_qp - Obtain a reference to an existing sharable QP.
1518 * @xrcd - XRC domain
1519 * @qp_open_attr: Attributes identifying the QP to open.
1520 *
1521 * Returns a reference to a sharable QP.
1522 */
1523struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1524 struct ib_qp_open_attr *qp_open_attr);
1525
1526/**
1527 * ib_close_qp - Release an external reference to a QP.
1528 * @qp: The QP handle to release
1529 *
1530 * The opened QP handle is released by the caller. The underlying
1531 * shared QP is not destroyed until all internal references are released.
1532 */
1533int ib_close_qp(struct ib_qp *qp);
1534
1535/**
1446 * ib_post_send - Posts a list of work requests to the send queue of 1536 * ib_post_send - Posts a list of work requests to the send queue of
1447 * the specified QP. 1537 * the specified QP.
1448 * @qp: The QP to post the work request on. 1538 * @qp: The QP to post the work request on.
@@ -2060,4 +2150,16 @@ int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2060 */ 2150 */
2061int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); 2151int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
2062 2152
2153/**
2154 * ib_alloc_xrcd - Allocates an XRC domain.
2155 * @device: The device on which to allocate the XRC domain.
2156 */
2157struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device);
2158
2159/**
2160 * ib_dealloc_xrcd - Deallocates an XRC domain.
2161 * @xrcd: The XRC domain to deallocate.
2162 */
2163int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
2164
2063#endif /* IB_VERBS_H */ 2165#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 2d0191c90f9e..1a046b1595cc 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -52,8 +52,10 @@ struct iw_cm_event {
52 struct sockaddr_in local_addr; 52 struct sockaddr_in local_addr;
53 struct sockaddr_in remote_addr; 53 struct sockaddr_in remote_addr;
54 void *private_data; 54 void *private_data;
55 u8 private_data_len;
56 void *provider_data; 55 void *provider_data;
56 u8 private_data_len;
57 u8 ord;
58 u8 ird;
57}; 59};
58 60
59/** 61/**
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 26977c149c41..51988f808181 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -65,6 +65,7 @@ enum rdma_cm_event_type {
65enum rdma_port_space { 65enum rdma_port_space {
66 RDMA_PS_SDP = 0x0001, 66 RDMA_PS_SDP = 0x0001,
67 RDMA_PS_IPOIB = 0x0002, 67 RDMA_PS_IPOIB = 0x0002,
68 RDMA_PS_IB = 0x013F,
68 RDMA_PS_TCP = 0x0106, 69 RDMA_PS_TCP = 0x0106,
69 RDMA_PS_UDP = 0x0111, 70 RDMA_PS_UDP = 0x0111,
70}; 71};
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
index fc82c1896f75..5348a000c8f3 100644
--- a/include/rdma/rdma_user_cm.h
+++ b/include/rdma/rdma_user_cm.h
@@ -77,7 +77,8 @@ struct rdma_ucm_create_id {
77 __u64 uid; 77 __u64 uid;
78 __u64 response; 78 __u64 response;
79 __u16 ps; 79 __u16 ps;
80 __u8 reserved[6]; 80 __u8 qp_type;
81 __u8 reserved[5];
81}; 82};
82 83
83struct rdma_ucm_create_id_resp { 84struct rdma_ucm_create_id_resp {