aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 15:03:32 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-15 15:03:32 -0500
commit4d5b57e05a67c3cfd8e2b2a64ca356245a15b1c6 (patch)
treed8f3ea3bc3ccfe289f414bbe9a4bdd1e935d9228 /include
parent6df8b74b1720db1133ace0861cb6721bfe57819a (diff)
parent6f94ba20799b98c8badf047b184fb4cd7bc45e44 (diff)
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma
Pull rdma updates from Doug Ledford: "This is the complete update for the rdma stack for this release cycle. Most of it is typical driver and core updates, but there is the entirely new VMWare pvrdma driver. You may have noticed that there were changes in DaveM's pull request to the bnxt Ethernet driver to support a RoCE RDMA driver. The bnxt_re driver was tentatively set to be pulled in this release cycle, but it simply wasn't ready in time and was dropped (a few review comments still to address, and some multi-arch build issues like prefetch() not working across all arches). Summary: - shared mlx5 updates with net stack (will drop out on merge if Dave's tree has already been merged) - driver updates: cxgb4, hfi1, hns-roce, i40iw, mlx4, mlx5, qedr, rxe - debug cleanups - new connection rejection helpers - SRP updates - various misc fixes - new paravirt driver from vmware" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (210 commits) IB: Add vmw_pvrdma driver IB/mlx4: fix improper return value IB/ocrdma: fix bad initialization infiniband: nes: return value of skb_linearize should be handled MAINTAINERS: Update Intel RDMA RNIC driver maintainers MAINTAINERS: Remove Mitesh Ahuja from emulex maintainers IB/core: fix unmap_sg argument qede: fix general protection fault may occur on probe IB/mthca: Replace pci_pool_alloc by pci_pool_zalloc mlx5, calc_sq_size(): Make a debug message more informative mlx5: Remove a set-but-not-used variable mlx5: Use { } instead of { 0 } to init struct IB/srp: Make writing the add_target sysfs attr interruptible IB/srp: Make mapping failures easier to debug IB/srp: Make login failures easier to debug IB/srp: Introduce a local variable in srp_add_one() IB/srp: Fix CONFIG_DYNAMIC_DEBUG=n build IB/multicast: Check ib_find_pkey() return value IPoIB: Avoid reading an uninitialized member variable IB/mad: Fix an array index check ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/rdma/ib_cm.h6
-rw-r--r--include/rdma/ib_mad.h2
-rw-r--r--include/rdma/ib_verbs.h70
-rw-r--r--include/rdma/iw_cm.h6
-rw-r--r--include/rdma/opa_smi.h2
-rw-r--r--include/rdma/rdma_cm.h25
-rw-r--r--include/rdma/rdma_vt.h46
-rw-r--r--include/rdma/rdmavt_mr.h10
-rw-r--r--include/rdma/rdmavt_qp.h77
-rw-r--r--include/uapi/rdma/Kbuild2
-rw-r--r--include/uapi/rdma/hfi/hfi1_user.h2
-rw-r--r--include/uapi/rdma/hns-abi.h54
-rw-r--r--include/uapi/rdma/ib_user_verbs.h38
-rw-r--r--include/uapi/rdma/mlx5-abi.h38
-rw-r--r--include/uapi/rdma/rdma_user_cm.h12
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h289
18 files changed, 631 insertions, 51 deletions
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index a5f0fbedf1e7..57bec544e20a 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -577,7 +577,7 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
577 u8 self_lb_en_modifiable[0x1]; 577 u8 self_lb_en_modifiable[0x1];
578 u8 reserved_at_9[0x2]; 578 u8 reserved_at_9[0x2];
579 u8 max_lso_cap[0x5]; 579 u8 max_lso_cap[0x5];
580 u8 reserved_at_10[0x2]; 580 u8 multi_pkt_send_wqe[0x2];
581 u8 wqe_inline_mode[0x2]; 581 u8 wqe_inline_mode[0x2];
582 u8 rss_ind_tbl_cap[0x4]; 582 u8 rss_ind_tbl_cap[0x4];
583 u8 reg_umr_sq[0x1]; 583 u8 reg_umr_sq[0x1];
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a5e6c7bca610..abf4aa4691b2 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2253,6 +2253,7 @@
2253#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 2253#define PCI_DEVICE_ID_RASTEL_2PORT 0x2000
2254 2254
2255#define PCI_VENDOR_ID_VMWARE 0x15ad 2255#define PCI_VENDOR_ID_VMWARE 0x15ad
2256#define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0
2256 2257
2257#define PCI_VENDOR_ID_ZOLTRIX 0x15b0 2258#define PCI_VENDOR_ID_ZOLTRIX 0x15b0
2258#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 2259#define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 92a7d85917b4..b49258b16f4e 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -603,4 +603,10 @@ struct ib_cm_sidr_rep_param {
603int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id, 603int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
604 struct ib_cm_sidr_rep_param *param); 604 struct ib_cm_sidr_rep_param *param);
605 605
606/**
607 * ibcm_reject_msg - return a pointer to a reject message string.
608 * @reason: Value returned in the REJECT event status field.
609 */
610const char *__attribute_const__ ibcm_reject_msg(int reason);
611
606#endif /* IB_CM_H */ 612#endif /* IB_CM_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index c8a773ffe23b..981214b3790c 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -46,7 +46,7 @@
46#define IB_MGMT_BASE_VERSION 1 46#define IB_MGMT_BASE_VERSION 1
47#define OPA_MGMT_BASE_VERSION 0x80 47#define OPA_MGMT_BASE_VERSION 0x80
48 48
49#define OPA_SMP_CLASS_VERSION 0x80 49#define OPA_SM_CLASS_VERSION 0x80
50 50
51/* Management classes */ 51/* Management classes */
52#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 52#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 5ad43a487745..8029d2a51f14 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1102,6 +1102,7 @@ enum ib_qp_attr_mask {
1102 IB_QP_RESERVED2 = (1<<22), 1102 IB_QP_RESERVED2 = (1<<22),
1103 IB_QP_RESERVED3 = (1<<23), 1103 IB_QP_RESERVED3 = (1<<23),
1104 IB_QP_RESERVED4 = (1<<24), 1104 IB_QP_RESERVED4 = (1<<24),
1105 IB_QP_RATE_LIMIT = (1<<25),
1105}; 1106};
1106 1107
1107enum ib_qp_state { 1108enum ib_qp_state {
@@ -1151,6 +1152,7 @@ struct ib_qp_attr {
1151 u8 rnr_retry; 1152 u8 rnr_retry;
1152 u8 alt_port_num; 1153 u8 alt_port_num;
1153 u8 alt_timeout; 1154 u8 alt_timeout;
1155 u32 rate_limit;
1154}; 1156};
1155 1157
1156enum ib_wr_opcode { 1158enum ib_wr_opcode {
@@ -1592,17 +1594,19 @@ enum ib_flow_attr_type {
1592/* Supported steering header types */ 1594/* Supported steering header types */
1593enum ib_flow_spec_type { 1595enum ib_flow_spec_type {
1594 /* L2 headers*/ 1596 /* L2 headers*/
1595 IB_FLOW_SPEC_ETH = 0x20, 1597 IB_FLOW_SPEC_ETH = 0x20,
1596 IB_FLOW_SPEC_IB = 0x22, 1598 IB_FLOW_SPEC_IB = 0x22,
1597 /* L3 header*/ 1599 /* L3 header*/
1598 IB_FLOW_SPEC_IPV4 = 0x30, 1600 IB_FLOW_SPEC_IPV4 = 0x30,
1599 IB_FLOW_SPEC_IPV6 = 0x31, 1601 IB_FLOW_SPEC_IPV6 = 0x31,
1600 /* L4 headers*/ 1602 /* L4 headers*/
1601 IB_FLOW_SPEC_TCP = 0x40, 1603 IB_FLOW_SPEC_TCP = 0x40,
1602 IB_FLOW_SPEC_UDP = 0x41 1604 IB_FLOW_SPEC_UDP = 0x41,
1605 IB_FLOW_SPEC_VXLAN_TUNNEL = 0x50,
1606 IB_FLOW_SPEC_INNER = 0x100,
1603}; 1607};
1604#define IB_FLOW_SPEC_LAYER_MASK 0xF0 1608#define IB_FLOW_SPEC_LAYER_MASK 0xF0
1605#define IB_FLOW_SPEC_SUPPORT_LAYERS 4 1609#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
1606 1610
1607/* Flow steering rule priority is set according to it's domain. 1611/* Flow steering rule priority is set according to it's domain.
1608 * Lower domain value means higher priority. 1612 * Lower domain value means higher priority.
@@ -1630,7 +1634,7 @@ struct ib_flow_eth_filter {
1630}; 1634};
1631 1635
1632struct ib_flow_spec_eth { 1636struct ib_flow_spec_eth {
1633 enum ib_flow_spec_type type; 1637 u32 type;
1634 u16 size; 1638 u16 size;
1635 struct ib_flow_eth_filter val; 1639 struct ib_flow_eth_filter val;
1636 struct ib_flow_eth_filter mask; 1640 struct ib_flow_eth_filter mask;
@@ -1644,7 +1648,7 @@ struct ib_flow_ib_filter {
1644}; 1648};
1645 1649
1646struct ib_flow_spec_ib { 1650struct ib_flow_spec_ib {
1647 enum ib_flow_spec_type type; 1651 u32 type;
1648 u16 size; 1652 u16 size;
1649 struct ib_flow_ib_filter val; 1653 struct ib_flow_ib_filter val;
1650 struct ib_flow_ib_filter mask; 1654 struct ib_flow_ib_filter mask;
@@ -1669,7 +1673,7 @@ struct ib_flow_ipv4_filter {
1669}; 1673};
1670 1674
1671struct ib_flow_spec_ipv4 { 1675struct ib_flow_spec_ipv4 {
1672 enum ib_flow_spec_type type; 1676 u32 type;
1673 u16 size; 1677 u16 size;
1674 struct ib_flow_ipv4_filter val; 1678 struct ib_flow_ipv4_filter val;
1675 struct ib_flow_ipv4_filter mask; 1679 struct ib_flow_ipv4_filter mask;
@@ -1687,7 +1691,7 @@ struct ib_flow_ipv6_filter {
1687}; 1691};
1688 1692
1689struct ib_flow_spec_ipv6 { 1693struct ib_flow_spec_ipv6 {
1690 enum ib_flow_spec_type type; 1694 u32 type;
1691 u16 size; 1695 u16 size;
1692 struct ib_flow_ipv6_filter val; 1696 struct ib_flow_ipv6_filter val;
1693 struct ib_flow_ipv6_filter mask; 1697 struct ib_flow_ipv6_filter mask;
@@ -1701,15 +1705,30 @@ struct ib_flow_tcp_udp_filter {
1701}; 1705};
1702 1706
1703struct ib_flow_spec_tcp_udp { 1707struct ib_flow_spec_tcp_udp {
1704 enum ib_flow_spec_type type; 1708 u32 type;
1705 u16 size; 1709 u16 size;
1706 struct ib_flow_tcp_udp_filter val; 1710 struct ib_flow_tcp_udp_filter val;
1707 struct ib_flow_tcp_udp_filter mask; 1711 struct ib_flow_tcp_udp_filter mask;
1708}; 1712};
1709 1713
1714struct ib_flow_tunnel_filter {
1715 __be32 tunnel_id;
1716 u8 real_sz[0];
1717};
1718
1719/* ib_flow_spec_tunnel describes the Vxlan tunnel
1720 * the tunnel_id from val has the vni value
1721 */
1722struct ib_flow_spec_tunnel {
1723 u32 type;
1724 u16 size;
1725 struct ib_flow_tunnel_filter val;
1726 struct ib_flow_tunnel_filter mask;
1727};
1728
1710union ib_flow_spec { 1729union ib_flow_spec {
1711 struct { 1730 struct {
1712 enum ib_flow_spec_type type; 1731 u32 type;
1713 u16 size; 1732 u16 size;
1714 }; 1733 };
1715 struct ib_flow_spec_eth eth; 1734 struct ib_flow_spec_eth eth;
@@ -1717,6 +1736,7 @@ union ib_flow_spec {
1717 struct ib_flow_spec_ipv4 ipv4; 1736 struct ib_flow_spec_ipv4 ipv4;
1718 struct ib_flow_spec_tcp_udp tcp_udp; 1737 struct ib_flow_spec_tcp_udp tcp_udp;
1719 struct ib_flow_spec_ipv6 ipv6; 1738 struct ib_flow_spec_ipv6 ipv6;
1739 struct ib_flow_spec_tunnel tunnel;
1720}; 1740};
1721 1741
1722struct ib_flow_attr { 1742struct ib_flow_attr {
@@ -1933,7 +1953,8 @@ struct ib_device {
1933 struct ib_udata *udata); 1953 struct ib_udata *udata);
1934 int (*dealloc_pd)(struct ib_pd *pd); 1954 int (*dealloc_pd)(struct ib_pd *pd);
1935 struct ib_ah * (*create_ah)(struct ib_pd *pd, 1955 struct ib_ah * (*create_ah)(struct ib_pd *pd,
1936 struct ib_ah_attr *ah_attr); 1956 struct ib_ah_attr *ah_attr,
1957 struct ib_udata *udata);
1937 int (*modify_ah)(struct ib_ah *ah, 1958 int (*modify_ah)(struct ib_ah *ah,
1938 struct ib_ah_attr *ah_attr); 1959 struct ib_ah_attr *ah_attr);
1939 int (*query_ah)(struct ib_ah *ah, 1960 int (*query_ah)(struct ib_ah *ah,
@@ -2581,6 +2602,24 @@ void ib_dealloc_pd(struct ib_pd *pd);
2581struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr); 2602struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
2582 2603
2583/** 2604/**
2605 * ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
2606 * work completion.
2607 * @hdr: the L3 header to parse
2608 * @net_type: type of header to parse
2609 * @sgid: place to store source gid
2610 * @dgid: place to store destination gid
2611 */
2612int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
2613 enum rdma_network_type net_type,
2614 union ib_gid *sgid, union ib_gid *dgid);
2615
2616/**
2617 * ib_get_rdma_header_version - Get the header version
2618 * @hdr: the L3 header to parse
2619 */
2620int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
2621
2622/**
2584 * ib_init_ah_from_wc - Initializes address handle attributes from a 2623 * ib_init_ah_from_wc - Initializes address handle attributes from a
2585 * work completion. 2624 * work completion.
2586 * @device: Device on which the received message arrived. 2625 * @device: Device on which the received message arrived.
@@ -3357,4 +3396,7 @@ int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
3357void ib_drain_rq(struct ib_qp *qp); 3396void ib_drain_rq(struct ib_qp *qp);
3358void ib_drain_sq(struct ib_qp *qp); 3397void ib_drain_sq(struct ib_qp *qp);
3359void ib_drain_qp(struct ib_qp *qp); 3398void ib_drain_qp(struct ib_qp *qp);
3399
3400int ib_resolve_eth_dmac(struct ib_device *device,
3401 struct ib_ah_attr *ah_attr);
3360#endif /* IB_VERBS_H */ 3402#endif /* IB_VERBS_H */
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 6d0065c322b7..5cd7701db148 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -253,4 +253,10 @@ int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
253int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr, 253int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
254 int *qp_attr_mask); 254 int *qp_attr_mask);
255 255
256/**
257 * iwcm_reject_msg - return a pointer to a reject message string.
258 * @reason: Value returned in the REJECT event status field.
259 */
260const char *__attribute_const__ iwcm_reject_msg(int reason);
261
256#endif /* IW_CM_H */ 262#endif /* IW_CM_H */
diff --git a/include/rdma/opa_smi.h b/include/rdma/opa_smi.h
index 4a529ef47995..f7896117936e 100644
--- a/include/rdma/opa_smi.h
+++ b/include/rdma/opa_smi.h
@@ -44,8 +44,6 @@
44#define OPA_MAX_SLS 32 44#define OPA_MAX_SLS 32
45#define OPA_MAX_SCS 32 45#define OPA_MAX_SCS 32
46 46
47#define OPA_SMI_CLASS_VERSION 0x80
48
49#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF) 47#define OPA_LID_PERMISSIVE cpu_to_be32(0xFFFFFFFF)
50 48
51struct opa_smp { 49struct opa_smp {
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 81fb1d15e8bb..d3968b561f86 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -388,4 +388,29 @@ int rdma_set_afonly(struct rdma_cm_id *id, int afonly);
388 */ 388 */
389__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr); 389__be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
390 390
391/**
392 * rdma_reject_msg - return a pointer to a reject message string.
393 * @id: Communication identifier that received the REJECT event.
394 * @reason: Value returned in the REJECT event status field.
395 */
396const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
397 int reason);
398/**
399 * rdma_is_consumer_reject - return true if the consumer rejected the connect
400 * request.
401 * @id: Communication identifier that received the REJECT event.
402 * @reason: Value returned in the REJECT event status field.
403 */
404bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
405
406/**
407 * rdma_consumer_reject_data - return the consumer reject private data and
408 * length, if any.
409 * @id: Communication identifier that received the REJECT event.
410 * @ev: RDMA CM reject event.
411 * @data_len: Pointer to the resulting length of the consumer data.
412 */
413const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
414 struct rdma_cm_event *ev, u8 *data_len);
415
391#endif /* RDMA_CM_H */ 416#endif /* RDMA_CM_H */
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index e31502107a58..861e23eaebda 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -185,6 +185,27 @@ struct rvt_driver_provided {
185 * check_support() for details. 185 * check_support() for details.
186 */ 186 */
187 187
188 /* hot path calldowns in a single cacheline */
189
190 /*
191 * Give the driver a notice that there is send work to do. It is up to
192 * the driver to generally push the packets out, this just queues the
193 * work with the driver. There are two variants here. The no_lock
194 * version requires the s_lock not to be held. The other assumes the
195 * s_lock is held.
196 */
197 void (*schedule_send)(struct rvt_qp *qp);
198 void (*schedule_send_no_lock)(struct rvt_qp *qp);
199
200 /* Driver specific work request checking */
201 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
202
203 /*
204 * Sometimes rdmavt needs to kick the driver's send progress. That is
205 * done by this call back.
206 */
207 void (*do_send)(struct rvt_qp *qp);
208
188 /* Passed to ib core registration. Callback to create syfs files */ 209 /* Passed to ib core registration. Callback to create syfs files */
189 int (*port_callback)(struct ib_device *, u8, struct kobject *); 210 int (*port_callback)(struct ib_device *, u8, struct kobject *);
190 211
@@ -223,22 +244,6 @@ struct rvt_driver_provided {
223 void (*notify_qp_reset)(struct rvt_qp *qp); 244 void (*notify_qp_reset)(struct rvt_qp *qp);
224 245
225 /* 246 /*
226 * Give the driver a notice that there is send work to do. It is up to
227 * the driver to generally push the packets out, this just queues the
228 * work with the driver. There are two variants here. The no_lock
229 * version requires the s_lock not to be held. The other assumes the
230 * s_lock is held.
231 */
232 void (*schedule_send)(struct rvt_qp *qp);
233 void (*schedule_send_no_lock)(struct rvt_qp *qp);
234
235 /*
236 * Sometimes rdmavt needs to kick the driver's send progress. That is
237 * done by this call back.
238 */
239 void (*do_send)(struct rvt_qp *qp);
240
241 /*
242 * Get a path mtu from the driver based on qp attributes. 247 * Get a path mtu from the driver based on qp attributes.
243 */ 248 */
244 int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp, 249 int (*get_pmtu_from_attr)(struct rvt_dev_info *rdi, struct rvt_qp *qp,
@@ -324,9 +329,6 @@ struct rvt_driver_provided {
324 void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr, 329 void (*modify_qp)(struct rvt_qp *qp, struct ib_qp_attr *attr,
325 int attr_mask, struct ib_udata *udata); 330 int attr_mask, struct ib_udata *udata);
326 331
327 /* Driver specific work request checking */
328 int (*check_send_wqe)(struct rvt_qp *qp, struct rvt_swqe *wqe);
329
330 /* Notify driver a mad agent has been created */ 332 /* Notify driver a mad agent has been created */
331 void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx); 333 void (*notify_create_mad_agent)(struct rvt_dev_info *rdi, int port_idx);
332 334
@@ -355,12 +357,12 @@ struct rvt_dev_info {
355 /* post send table */ 357 /* post send table */
356 const struct rvt_operation_params *post_parms; 358 const struct rvt_operation_params *post_parms;
357 359
358 struct rvt_mregion __rcu *dma_mr;
359 struct rvt_lkey_table lkey_table;
360
361 /* Driver specific helper functions */ 360 /* Driver specific helper functions */
362 struct rvt_driver_provided driver_f; 361 struct rvt_driver_provided driver_f;
363 362
363 struct rvt_mregion __rcu *dma_mr;
364 struct rvt_lkey_table lkey_table;
365
364 /* Internal use */ 366 /* Internal use */
365 int n_pds_allocated; 367 int n_pds_allocated;
366 spinlock_t n_pds_lock; /* Protect pd allocated count */ 368 spinlock_t n_pds_lock; /* Protect pd allocated count */
diff --git a/include/rdma/rdmavt_mr.h b/include/rdma/rdmavt_mr.h
index 6b3c6c8b6b77..de59de28b6a2 100644
--- a/include/rdma/rdmavt_mr.h
+++ b/include/rdma/rdmavt_mr.h
@@ -90,11 +90,15 @@ struct rvt_mregion {
90#define RVT_MAX_LKEY_TABLE_BITS 23 90#define RVT_MAX_LKEY_TABLE_BITS 23
91 91
92struct rvt_lkey_table { 92struct rvt_lkey_table {
93 spinlock_t lock; /* protect changes in this struct */ 93 /* read mostly fields */
94 u32 next; /* next unused index (speeds search) */
95 u32 gen; /* generation count */
96 u32 max; /* size of the table */ 94 u32 max; /* size of the table */
95 u32 shift; /* lkey/rkey shift */
97 struct rvt_mregion __rcu **table; 96 struct rvt_mregion __rcu **table;
97 /* writeable fields */
98 /* protect changes in this struct */
99 spinlock_t lock ____cacheline_aligned_in_smp;
100 u32 next; /* next unused index (speeds search) */
101 u32 gen; /* generation count */
98}; 102};
99 103
100/* 104/*
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 2c5183ef0243..f3dbd157ae5c 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -51,6 +51,7 @@
51#include <rdma/rdma_vt.h> 51#include <rdma/rdma_vt.h>
52#include <rdma/ib_pack.h> 52#include <rdma/ib_pack.h>
53#include <rdma/ib_verbs.h> 53#include <rdma/ib_verbs.h>
54#include <rdma/rdmavt_cq.h>
54/* 55/*
55 * Atomic bit definitions for r_aflags. 56 * Atomic bit definitions for r_aflags.
56 */ 57 */
@@ -485,6 +486,23 @@ static inline void rvt_put_qp(struct rvt_qp *qp)
485} 486}
486 487
487/** 488/**
489 * rvt_put_swqe - drop mr refs held by swqe
490 * @wqe - the send wqe
491 *
492 * This drops any mr references held by the swqe
493 */
494static inline void rvt_put_swqe(struct rvt_swqe *wqe)
495{
496 int i;
497
498 for (i = 0; i < wqe->wr.num_sge; i++) {
499 struct rvt_sge *sge = &wqe->sg_list[i];
500
501 rvt_put_mr(sge->mr);
502 }
503}
504
505/**
488 * rvt_qp_wqe_reserve - reserve operation 506 * rvt_qp_wqe_reserve - reserve operation
489 * @qp - the rvt qp 507 * @qp - the rvt qp
490 * @wqe - the send wqe 508 * @wqe - the send wqe
@@ -527,6 +545,65 @@ static inline void rvt_qp_wqe_unreserve(
527 } 545 }
528} 546}
529 547
548extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
549
550/**
551 * rvt_qp_swqe_complete() - insert send completion
552 * @qp - the qp
553 * @wqe - the send wqe
554 * @status - completion status
555 *
556 * Insert a send completion into the completion
557 * queue if the qp indicates it should be done.
558 *
559 * See IBTA 10.7.3.1 for info on completion
560 * control.
561 */
562static inline void rvt_qp_swqe_complete(
563 struct rvt_qp *qp,
564 struct rvt_swqe *wqe,
565 enum ib_wc_status status)
566{
567 if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
568 return;
569 if (!(qp->s_flags & RVT_S_SIGNAL_REQ_WR) ||
570 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
571 status != IB_WC_SUCCESS) {
572 struct ib_wc wc;
573
574 memset(&wc, 0, sizeof(wc));
575 wc.wr_id = wqe->wr.wr_id;
576 wc.status = status;
577 wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
578 wc.qp = &qp->ibqp;
579 wc.byte_len = wqe->length;
580 rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
581 status != IB_WC_SUCCESS);
582 }
583}
584
585/**
586 * @qp - the qp pair
587 * @len - the length
588 *
589 * Perform a shift based mtu round up divide
590 */
591static inline u32 rvt_div_round_up_mtu(struct rvt_qp *qp, u32 len)
592{
593 return (len + qp->pmtu - 1) >> qp->log_pmtu;
594}
595
596/**
597 * @qp - the qp pair
598 * @len - the length
599 *
600 * Perform a shift based mtu divide
601 */
602static inline u32 rvt_div_mtu(struct rvt_qp *qp, u32 len)
603{
604 return len >> qp->log_pmtu;
605}
606
530extern const int ib_rvt_state_ops[]; 607extern const int ib_rvt_state_ops[];
531 608
532struct rvt_dev_info; 609struct rvt_dev_info;
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index f14ab7ff5fee..82bdf5626859 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -14,3 +14,5 @@ header-y += mlx5-abi.h
14header-y += mthca-abi.h 14header-y += mthca-abi.h
15header-y += nes-abi.h 15header-y += nes-abi.h
16header-y += ocrdma-abi.h 16header-y += ocrdma-abi.h
17header-y += hns-abi.h
18header-y += vmw_pvrdma-abi.h
diff --git a/include/uapi/rdma/hfi/hfi1_user.h b/include/uapi/rdma/hfi/hfi1_user.h
index d15e7289d835..587b7360e820 100644
--- a/include/uapi/rdma/hfi/hfi1_user.h
+++ b/include/uapi/rdma/hfi/hfi1_user.h
@@ -75,7 +75,7 @@
75 * may not be implemented; the user code must deal with this if it 75 * may not be implemented; the user code must deal with this if it
76 * cares, or it must abort after initialization reports the difference. 76 * cares, or it must abort after initialization reports the difference.
77 */ 77 */
78#define HFI1_USER_SWMINOR 2 78#define HFI1_USER_SWMINOR 3
79 79
80/* 80/*
81 * We will encode the major/minor inside a single 32bit version number. 81 * We will encode the major/minor inside a single 32bit version number.
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
new file mode 100644
index 000000000000..5d7401963e35
--- /dev/null
+++ b/include/uapi/rdma/hns-abi.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (c) 2016 Hisilicon Limited.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef HNS_ABI_USER_H
34#define HNS_ABI_USER_H
35
36#include <linux/types.h>
37
38struct hns_roce_ib_create_cq {
39 __u64 buf_addr;
40};
41
42struct hns_roce_ib_create_qp {
43 __u64 buf_addr;
44 __u64 db_addr;
45 __u8 log_sq_bb_count;
46 __u8 log_sq_stride;
47 __u8 sq_no_prefetch;
48 __u8 reserved[5];
49};
50
51struct hns_roce_ib_alloc_ucontext_resp {
52 __u32 qp_tab_size;
53};
54#endif /* HNS_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 25225ebbc7d5..dfdfe4e92d31 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -37,6 +37,7 @@
37#define IB_USER_VERBS_H 37#define IB_USER_VERBS_H
38 38
39#include <linux/types.h> 39#include <linux/types.h>
40#include <rdma/ib_verbs.h>
40 41
41/* 42/*
42 * Increment this value if any changes that break userspace ABI 43 * Increment this value if any changes that break userspace ABI
@@ -93,6 +94,7 @@ enum {
93 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, 94 IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
94 IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ, 95 IB_USER_VERBS_EX_CMD_CREATE_CQ = IB_USER_VERBS_CMD_CREATE_CQ,
95 IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP, 96 IB_USER_VERBS_EX_CMD_CREATE_QP = IB_USER_VERBS_CMD_CREATE_QP,
97 IB_USER_VERBS_EX_CMD_MODIFY_QP = IB_USER_VERBS_CMD_MODIFY_QP,
96 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, 98 IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
97 IB_USER_VERBS_EX_CMD_DESTROY_FLOW, 99 IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
98 IB_USER_VERBS_EX_CMD_CREATE_WQ, 100 IB_USER_VERBS_EX_CMD_CREATE_WQ,
@@ -545,6 +547,14 @@ enum {
545 IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE, 547 IB_UVERBS_CREATE_QP_SUP_COMP_MASK = IB_UVERBS_CREATE_QP_MASK_IND_TABLE,
546}; 548};
547 549
550enum {
551 IB_USER_LEGACY_LAST_QP_ATTR_MASK = IB_QP_DEST_QPN
552};
553
554enum {
555 IB_USER_LAST_QP_ATTR_MASK = IB_QP_RATE_LIMIT
556};
557
548struct ib_uverbs_ex_create_qp { 558struct ib_uverbs_ex_create_qp {
549 __u64 user_handle; 559 __u64 user_handle;
550 __u32 pd_handle; 560 __u32 pd_handle;
@@ -684,9 +694,20 @@ struct ib_uverbs_modify_qp {
684 __u64 driver_data[0]; 694 __u64 driver_data[0];
685}; 695};
686 696
697struct ib_uverbs_ex_modify_qp {
698 struct ib_uverbs_modify_qp base;
699 __u32 rate_limit;
700 __u32 reserved;
701};
702
687struct ib_uverbs_modify_qp_resp { 703struct ib_uverbs_modify_qp_resp {
688}; 704};
689 705
706struct ib_uverbs_ex_modify_qp_resp {
707 __u32 comp_mask;
708 __u32 response_length;
709};
710
690struct ib_uverbs_destroy_qp { 711struct ib_uverbs_destroy_qp {
691 __u64 response; 712 __u64 response;
692 __u32 qp_handle; 713 __u32 qp_handle;
@@ -908,6 +929,23 @@ struct ib_uverbs_flow_spec_ipv6 {
908 struct ib_uverbs_flow_ipv6_filter mask; 929 struct ib_uverbs_flow_ipv6_filter mask;
909}; 930};
910 931
932struct ib_uverbs_flow_tunnel_filter {
933 __be32 tunnel_id;
934};
935
936struct ib_uverbs_flow_spec_tunnel {
937 union {
938 struct ib_uverbs_flow_spec_hdr hdr;
939 struct {
940 __u32 type;
941 __u16 size;
942 __u16 reserved;
943 };
944 };
945 struct ib_uverbs_flow_tunnel_filter val;
946 struct ib_uverbs_flow_tunnel_filter mask;
947};
948
911struct ib_uverbs_flow_attr { 949struct ib_uverbs_flow_attr {
912 __u32 type; 950 __u32 type;
913 __u16 size; 951 __u16 size;
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index f5d0f4e83b59..fae6cdaeb56d 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -82,6 +82,7 @@ enum mlx5_ib_alloc_ucontext_resp_mask {
82 82
83enum mlx5_user_cmds_supp_uhw { 83enum mlx5_user_cmds_supp_uhw {
84 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0, 84 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
85 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
85}; 86};
86 87
87struct mlx5_ib_alloc_ucontext_resp { 88struct mlx5_ib_alloc_ucontext_resp {
@@ -124,18 +125,47 @@ struct mlx5_ib_rss_caps {
124 __u8 reserved[7]; 125 __u8 reserved[7];
125}; 126};
126 127
128enum mlx5_ib_cqe_comp_res_format {
129 MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
130 MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
131 MLX5_IB_CQE_RES_RESERVED = 1 << 2,
132};
133
134struct mlx5_ib_cqe_comp_caps {
135 __u32 max_num;
136 __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
137};
138
139struct mlx5_packet_pacing_caps {
140 __u32 qp_rate_limit_min;
141 __u32 qp_rate_limit_max; /* In kpbs */
142
143 /* Corresponding bit will be set if qp type from
144 * 'enum ib_qp_type' is supported, e.g.
145 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
146 */
147 __u32 supported_qpts;
148 __u32 reserved;
149};
150
127struct mlx5_ib_query_device_resp { 151struct mlx5_ib_query_device_resp {
128 __u32 comp_mask; 152 __u32 comp_mask;
129 __u32 response_length; 153 __u32 response_length;
130 struct mlx5_ib_tso_caps tso_caps; 154 struct mlx5_ib_tso_caps tso_caps;
131 struct mlx5_ib_rss_caps rss_caps; 155 struct mlx5_ib_rss_caps rss_caps;
156 struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
157 struct mlx5_packet_pacing_caps packet_pacing_caps;
158 __u32 mlx5_ib_support_multi_pkt_send_wqes;
159 __u32 reserved;
132}; 160};
133 161
134struct mlx5_ib_create_cq { 162struct mlx5_ib_create_cq {
135 __u64 buf_addr; 163 __u64 buf_addr;
136 __u64 db_addr; 164 __u64 db_addr;
137 __u32 cqe_size; 165 __u32 cqe_size;
138 __u32 reserved; /* explicit padding (optional on i386) */ 166 __u8 cqe_comp_en;
167 __u8 cqe_comp_res_format;
168 __u16 reserved; /* explicit padding (optional on i386) */
139}; 169};
140 170
141struct mlx5_ib_create_cq_resp { 171struct mlx5_ib_create_cq_resp {
@@ -232,6 +262,12 @@ struct mlx5_ib_create_wq {
232 __u32 reserved; 262 __u32 reserved;
233}; 263};
234 264
265struct mlx5_ib_create_ah_resp {
266 __u32 response_length;
267 __u8 dmac[ETH_ALEN];
268 __u8 reserved[6];
269};
270
235struct mlx5_ib_create_wq_resp { 271struct mlx5_ib_create_wq_resp {
236 __u32 response_length; 272 __u32 response_length;
237 __u32 reserved; 273 __u32 reserved;
diff --git a/include/uapi/rdma/rdma_user_cm.h b/include/uapi/rdma/rdma_user_cm.h
index 01923d463673..d71da36e3cd6 100644
--- a/include/uapi/rdma/rdma_user_cm.h
+++ b/include/uapi/rdma/rdma_user_cm.h
@@ -110,7 +110,7 @@ struct rdma_ucm_bind {
110 __u32 id; 110 __u32 id;
111 __u16 addr_size; 111 __u16 addr_size;
112 __u16 reserved; 112 __u16 reserved;
113 struct sockaddr_storage addr; 113 struct __kernel_sockaddr_storage addr;
114}; 114};
115 115
116struct rdma_ucm_resolve_ip { 116struct rdma_ucm_resolve_ip {
@@ -126,8 +126,8 @@ struct rdma_ucm_resolve_addr {
126 __u16 src_size; 126 __u16 src_size;
127 __u16 dst_size; 127 __u16 dst_size;
128 __u32 reserved; 128 __u32 reserved;
129 struct sockaddr_storage src_addr; 129 struct __kernel_sockaddr_storage src_addr;
130 struct sockaddr_storage dst_addr; 130 struct __kernel_sockaddr_storage dst_addr;
131}; 131};
132 132
133struct rdma_ucm_resolve_route { 133struct rdma_ucm_resolve_route {
@@ -164,8 +164,8 @@ struct rdma_ucm_query_addr_resp {
164 __u16 pkey; 164 __u16 pkey;
165 __u16 src_size; 165 __u16 src_size;
166 __u16 dst_size; 166 __u16 dst_size;
167 struct sockaddr_storage src_addr; 167 struct __kernel_sockaddr_storage src_addr;
168 struct sockaddr_storage dst_addr; 168 struct __kernel_sockaddr_storage dst_addr;
169}; 169};
170 170
171struct rdma_ucm_query_path_resp { 171struct rdma_ucm_query_path_resp {
@@ -257,7 +257,7 @@ struct rdma_ucm_join_mcast {
257 __u32 id; 257 __u32 id;
258 __u16 addr_size; 258 __u16 addr_size;
259 __u16 join_flags; 259 __u16 join_flags;
260 struct sockaddr_storage addr; 260 struct __kernel_sockaddr_storage addr;
261}; 261};
262 262
263struct rdma_ucm_get_event { 263struct rdma_ucm_get_event {
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
new file mode 100644
index 000000000000..5016abc9ee97
--- /dev/null
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -0,0 +1,289 @@
1/*
2 * Copyright (c) 2012-2016 VMware, Inc. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of EITHER the GNU General Public License
6 * version 2 as published by the Free Software Foundation or the BSD
7 * 2-Clause License. This program is distributed in the hope that it
8 * will be useful, but WITHOUT ANY WARRANTY; WITHOUT EVEN THE IMPLIED
9 * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
10 * See the GNU General Public License version 2 for more details at
11 * http://www.gnu.org/licenses/old-licenses/gpl-2.0.en.html.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program available in the file COPYING in the main
15 * directory of this source tree.
16 *
17 * The BSD 2-Clause License
18 *
19 * Redistribution and use in source and binary forms, with or
20 * without modification, are permitted provided that the following
21 * conditions are met:
22 *
23 * - Redistributions of source code must retain the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer.
26 *
27 * - Redistributions in binary form must reproduce the above
28 * copyright notice, this list of conditions and the following
29 * disclaimer in the documentation and/or other materials
30 * provided with the distribution.
31 *
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
35 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
36 * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
37 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
38 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
39 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
41 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
42 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
43 * OF THE POSSIBILITY OF SUCH DAMAGE.
44 */
45
46#ifndef __VMW_PVRDMA_ABI_H__
47#define __VMW_PVRDMA_ABI_H__
48
49#include <linux/types.h>
50
51#define PVRDMA_UVERBS_ABI_VERSION 3 /* ABI Version. */
52#define PVRDMA_UAR_HANDLE_MASK 0x00FFFFFF /* Bottom 24 bits. */
53#define PVRDMA_UAR_QP_OFFSET 0 /* QP doorbell. */
54#define PVRDMA_UAR_QP_SEND BIT(30) /* Send bit. */
55#define PVRDMA_UAR_QP_RECV BIT(31) /* Recv bit. */
56#define PVRDMA_UAR_CQ_OFFSET 4 /* CQ doorbell. */
57#define PVRDMA_UAR_CQ_ARM_SOL BIT(29) /* Arm solicited bit. */
58#define PVRDMA_UAR_CQ_ARM BIT(30) /* Arm bit. */
59#define PVRDMA_UAR_CQ_POLL BIT(31) /* Poll bit. */
60
61enum pvrdma_wr_opcode {
62 PVRDMA_WR_RDMA_WRITE,
63 PVRDMA_WR_RDMA_WRITE_WITH_IMM,
64 PVRDMA_WR_SEND,
65 PVRDMA_WR_SEND_WITH_IMM,
66 PVRDMA_WR_RDMA_READ,
67 PVRDMA_WR_ATOMIC_CMP_AND_SWP,
68 PVRDMA_WR_ATOMIC_FETCH_AND_ADD,
69 PVRDMA_WR_LSO,
70 PVRDMA_WR_SEND_WITH_INV,
71 PVRDMA_WR_RDMA_READ_WITH_INV,
72 PVRDMA_WR_LOCAL_INV,
73 PVRDMA_WR_FAST_REG_MR,
74 PVRDMA_WR_MASKED_ATOMIC_CMP_AND_SWP,
75 PVRDMA_WR_MASKED_ATOMIC_FETCH_AND_ADD,
76 PVRDMA_WR_BIND_MW,
77 PVRDMA_WR_REG_SIG_MR,
78};
79
80enum pvrdma_wc_status {
81 PVRDMA_WC_SUCCESS,
82 PVRDMA_WC_LOC_LEN_ERR,
83 PVRDMA_WC_LOC_QP_OP_ERR,
84 PVRDMA_WC_LOC_EEC_OP_ERR,
85 PVRDMA_WC_LOC_PROT_ERR,
86 PVRDMA_WC_WR_FLUSH_ERR,
87 PVRDMA_WC_MW_BIND_ERR,
88 PVRDMA_WC_BAD_RESP_ERR,
89 PVRDMA_WC_LOC_ACCESS_ERR,
90 PVRDMA_WC_REM_INV_REQ_ERR,
91 PVRDMA_WC_REM_ACCESS_ERR,
92 PVRDMA_WC_REM_OP_ERR,
93 PVRDMA_WC_RETRY_EXC_ERR,
94 PVRDMA_WC_RNR_RETRY_EXC_ERR,
95 PVRDMA_WC_LOC_RDD_VIOL_ERR,
96 PVRDMA_WC_REM_INV_RD_REQ_ERR,
97 PVRDMA_WC_REM_ABORT_ERR,
98 PVRDMA_WC_INV_EECN_ERR,
99 PVRDMA_WC_INV_EEC_STATE_ERR,
100 PVRDMA_WC_FATAL_ERR,
101 PVRDMA_WC_RESP_TIMEOUT_ERR,
102 PVRDMA_WC_GENERAL_ERR,
103};
104
105enum pvrdma_wc_opcode {
106 PVRDMA_WC_SEND,
107 PVRDMA_WC_RDMA_WRITE,
108 PVRDMA_WC_RDMA_READ,
109 PVRDMA_WC_COMP_SWAP,
110 PVRDMA_WC_FETCH_ADD,
111 PVRDMA_WC_BIND_MW,
112 PVRDMA_WC_LSO,
113 PVRDMA_WC_LOCAL_INV,
114 PVRDMA_WC_FAST_REG_MR,
115 PVRDMA_WC_MASKED_COMP_SWAP,
116 PVRDMA_WC_MASKED_FETCH_ADD,
117 PVRDMA_WC_RECV = 1 << 7,
118 PVRDMA_WC_RECV_RDMA_WITH_IMM,
119};
120
121enum pvrdma_wc_flags {
122 PVRDMA_WC_GRH = 1 << 0,
123 PVRDMA_WC_WITH_IMM = 1 << 1,
124 PVRDMA_WC_WITH_INVALIDATE = 1 << 2,
125 PVRDMA_WC_IP_CSUM_OK = 1 << 3,
126 PVRDMA_WC_WITH_SMAC = 1 << 4,
127 PVRDMA_WC_WITH_VLAN = 1 << 5,
128 PVRDMA_WC_FLAGS_MAX = PVRDMA_WC_WITH_VLAN,
129};
130
131struct pvrdma_alloc_ucontext_resp {
132 __u32 qp_tab_size;
133 __u32 reserved;
134};
135
136struct pvrdma_alloc_pd_resp {
137 __u32 pdn;
138 __u32 reserved;
139};
140
141struct pvrdma_create_cq {
142 __u64 buf_addr;
143 __u32 buf_size;
144 __u32 reserved;
145};
146
147struct pvrdma_create_cq_resp {
148 __u32 cqn;
149 __u32 reserved;
150};
151
152struct pvrdma_resize_cq {
153 __u64 buf_addr;
154 __u32 buf_size;
155 __u32 reserved;
156};
157
158struct pvrdma_create_srq {
159 __u64 buf_addr;
160};
161
162struct pvrdma_create_srq_resp {
163 __u32 srqn;
164 __u32 reserved;
165};
166
167struct pvrdma_create_qp {
168 __u64 rbuf_addr;
169 __u64 sbuf_addr;
170 __u32 rbuf_size;
171 __u32 sbuf_size;
172 __u64 qp_addr;
173};
174
175/* PVRDMA masked atomic compare and swap */
176struct pvrdma_ex_cmp_swap {
177 __u64 swap_val;
178 __u64 compare_val;
179 __u64 swap_mask;
180 __u64 compare_mask;
181};
182
183/* PVRDMA masked atomic fetch and add */
184struct pvrdma_ex_fetch_add {
185 __u64 add_val;
186 __u64 field_boundary;
187};
188
189/* PVRDMA address vector. */
190struct pvrdma_av {
191 __u32 port_pd;
192 __u32 sl_tclass_flowlabel;
193 __u8 dgid[16];
194 __u8 src_path_bits;
195 __u8 gid_index;
196 __u8 stat_rate;
197 __u8 hop_limit;
198 __u8 dmac[6];
199 __u8 reserved[6];
200};
201
202/* PVRDMA scatter/gather entry */
203struct pvrdma_sge {
204 __u64 addr;
205 __u32 length;
206 __u32 lkey;
207};
208
209/* PVRDMA receive queue work request */
210struct pvrdma_rq_wqe_hdr {
211 __u64 wr_id; /* wr id */
212 __u32 num_sge; /* size of s/g array */
213 __u32 total_len; /* reserved */
214};
215/* Use pvrdma_sge (ib_sge) for receive queue s/g array elements. */
216
217/* PVRDMA send queue work request */
218struct pvrdma_sq_wqe_hdr {
219 __u64 wr_id; /* wr id */
220 __u32 num_sge; /* size of s/g array */
221 __u32 total_len; /* reserved */
222 __u32 opcode; /* operation type */
223 __u32 send_flags; /* wr flags */
224 union {
225 __u32 imm_data;
226 __u32 invalidate_rkey;
227 } ex;
228 __u32 reserved;
229 union {
230 struct {
231 __u64 remote_addr;
232 __u32 rkey;
233 __u8 reserved[4];
234 } rdma;
235 struct {
236 __u64 remote_addr;
237 __u64 compare_add;
238 __u64 swap;
239 __u32 rkey;
240 __u32 reserved;
241 } atomic;
242 struct {
243 __u64 remote_addr;
244 __u32 log_arg_sz;
245 __u32 rkey;
246 union {
247 struct pvrdma_ex_cmp_swap cmp_swap;
248 struct pvrdma_ex_fetch_add fetch_add;
249 } wr_data;
250 } masked_atomics;
251 struct {
252 __u64 iova_start;
253 __u64 pl_pdir_dma;
254 __u32 page_shift;
255 __u32 page_list_len;
256 __u32 length;
257 __u32 access_flags;
258 __u32 rkey;
259 } fast_reg;
260 struct {
261 __u32 remote_qpn;
262 __u32 remote_qkey;
263 struct pvrdma_av av;
264 } ud;
265 } wr;
266};
267/* Use pvrdma_sge (ib_sge) for send queue s/g array elements. */
268
269/* Completion queue element. */
270struct pvrdma_cqe {
271 __u64 wr_id;
272 __u64 qp;
273 __u32 opcode;
274 __u32 status;
275 __u32 byte_len;
276 __u32 imm_data;
277 __u32 src_qp;
278 __u32 wc_flags;
279 __u32 vendor_err;
280 __u16 pkey_index;
281 __u16 slid;
282 __u8 sl;
283 __u8 dlid_path_bits;
284 __u8 port_num;
285 __u8 smac[6];
286 __u8 reserved2[7]; /* Pad to next power of 2 (64). */
287};
288
289#endif /* __VMW_PVRDMA_ABI_H__ */