aboutsummaryrefslogtreecommitdiffstats
path: root/include/rdma
diff options
context:
space:
mode:
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_user_mad.h2
-rw-r--r--include/rdma/ib_verbs.h271
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
6 files changed, 530 insertions, 19 deletions
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0dfb..db037205c9e8 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
42 struct ib_qp_attr *src); 42 struct ib_qp_attr *src);
43 43
44void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
45 struct ib_ah_attr *src);
46
44void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 47void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
45 struct ib_sa_path_rec *src); 48 struct ib_sa_path_rec *src);
46 49
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
index 44537aa32e62..d66b15ea82c4 100644
--- a/include/rdma/ib_user_mad.h
+++ b/include/rdma/ib_user_mad.h
@@ -98,7 +98,7 @@ struct ib_user_mad_hdr {
98 */ 98 */
99struct ib_user_mad { 99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr; 100 struct ib_user_mad_hdr hdr;
101 __u8 data[0]; 101 __u64 data[0];
102}; 102};
103 103
104/** 104/**
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..765589f4d166 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,9 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
48#include <linux/kref.h>
46 49
47#include <asm/atomic.h> 50#include <asm/atomic.h>
48#include <asm/scatterlist.h> 51#include <asm/scatterlist.h>
@@ -417,8 +420,8 @@ struct ib_wc {
417 enum ib_wc_opcode opcode; 420 enum ib_wc_opcode opcode;
418 u32 vendor_err; 421 u32 vendor_err;
419 u32 byte_len; 422 u32 byte_len;
423 struct ib_qp *qp;
420 __be32 imm_data; 424 __be32 imm_data;
421 u32 qp_num;
422 u32 src_qp; 425 u32 src_qp;
423 int wc_flags; 426 int wc_flags;
424 u16 pkey_index; 427 u16 pkey_index;
@@ -848,6 +851,49 @@ struct ib_cache {
848 u8 *lmc_cache; 851 u8 *lmc_cache;
849}; 852};
850 853
854struct ib_dma_mapping_ops {
855 int (*mapping_error)(struct ib_device *dev,
856 u64 dma_addr);
857 u64 (*map_single)(struct ib_device *dev,
858 void *ptr, size_t size,
859 enum dma_data_direction direction);
860 void (*unmap_single)(struct ib_device *dev,
861 u64 addr, size_t size,
862 enum dma_data_direction direction);
863 u64 (*map_page)(struct ib_device *dev,
864 struct page *page, unsigned long offset,
865 size_t size,
866 enum dma_data_direction direction);
867 void (*unmap_page)(struct ib_device *dev,
868 u64 addr, size_t size,
869 enum dma_data_direction direction);
870 int (*map_sg)(struct ib_device *dev,
871 struct scatterlist *sg, int nents,
872 enum dma_data_direction direction);
873 void (*unmap_sg)(struct ib_device *dev,
874 struct scatterlist *sg, int nents,
875 enum dma_data_direction direction);
876 u64 (*dma_address)(struct ib_device *dev,
877 struct scatterlist *sg);
878 unsigned int (*dma_len)(struct ib_device *dev,
879 struct scatterlist *sg);
880 void (*sync_single_for_cpu)(struct ib_device *dev,
881 u64 dma_handle,
882 size_t size,
883 enum dma_data_direction dir);
884 void (*sync_single_for_device)(struct ib_device *dev,
885 u64 dma_handle,
886 size_t size,
887 enum dma_data_direction dir);
888 void *(*alloc_coherent)(struct ib_device *dev,
889 size_t size,
890 u64 *dma_handle,
891 gfp_t flag);
892 void (*free_coherent)(struct ib_device *dev,
893 size_t size, void *cpu_addr,
894 u64 dma_handle);
895};
896
851struct iw_cm_verbs; 897struct iw_cm_verbs;
852 898
853struct ib_device { 899struct ib_device {
@@ -992,6 +1038,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1038 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1039 struct ib_mad *out_mad);
994 1040
1041 struct ib_dma_mapping_ops *dma_ops;
1042
995 struct module *owner; 1043 struct module *owner;
996 struct class_device class_dev; 1044 struct class_device class_dev;
997 struct kobject ports_parent; 1045 struct kobject ports_parent;
@@ -1395,10 +1443,231 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1443 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1444 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1445 * @mr_access_flags: Specifies the memory access rights.
1446 *
1447 * Note that the ib_dma_*() functions defined below must be used
1448 * to create/destroy addresses used with the Lkey or Rkey returned
1449 * by ib_get_dma_mr().
1398 */ 1450 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1451struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1452
1401/** 1453/**
1454 * ib_dma_mapping_error - check a DMA addr for error
1455 * @dev: The device for which the dma_addr was created
1456 * @dma_addr: The DMA address to check
1457 */
1458static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1459{
1460 if (dev->dma_ops)
1461 return dev->dma_ops->mapping_error(dev, dma_addr);
1462 return dma_mapping_error(dma_addr);
1463}
1464
1465/**
1466 * ib_dma_map_single - Map a kernel virtual address to DMA address
1467 * @dev: The device for which the dma_addr is to be created
1468 * @cpu_addr: The kernel virtual address
1469 * @size: The size of the region in bytes
1470 * @direction: The direction of the DMA
1471 */
1472static inline u64 ib_dma_map_single(struct ib_device *dev,
1473 void *cpu_addr, size_t size,
1474 enum dma_data_direction direction)
1475{
1476 if (dev->dma_ops)
1477 return dev->dma_ops->map_single(dev, cpu_addr, size, direction);
1478 return dma_map_single(dev->dma_device, cpu_addr, size, direction);
1479}
1480
1481/**
1482 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1483 * @dev: The device for which the DMA address was created
1484 * @addr: The DMA address
1485 * @size: The size of the region in bytes
1486 * @direction: The direction of the DMA
1487 */
1488static inline void ib_dma_unmap_single(struct ib_device *dev,
1489 u64 addr, size_t size,
1490 enum dma_data_direction direction)
1491{
1492 if (dev->dma_ops)
1493 dev->dma_ops->unmap_single(dev, addr, size, direction);
1494 else
1495 dma_unmap_single(dev->dma_device, addr, size, direction);
1496}
1497
1498/**
1499 * ib_dma_map_page - Map a physical page to DMA address
1500 * @dev: The device for which the dma_addr is to be created
1501 * @page: The page to be mapped
1502 * @offset: The offset within the page
1503 * @size: The size of the region in bytes
1504 * @direction: The direction of the DMA
1505 */
1506static inline u64 ib_dma_map_page(struct ib_device *dev,
1507 struct page *page,
1508 unsigned long offset,
1509 size_t size,
1510 enum dma_data_direction direction)
1511{
1512 if (dev->dma_ops)
1513 return dev->dma_ops->map_page(dev, page, offset, size, direction);
1514 return dma_map_page(dev->dma_device, page, offset, size, direction);
1515}
1516
1517/**
1518 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1519 * @dev: The device for which the DMA address was created
1520 * @addr: The DMA address
1521 * @size: The size of the region in bytes
1522 * @direction: The direction of the DMA
1523 */
1524static inline void ib_dma_unmap_page(struct ib_device *dev,
1525 u64 addr, size_t size,
1526 enum dma_data_direction direction)
1527{
1528 if (dev->dma_ops)
1529 dev->dma_ops->unmap_page(dev, addr, size, direction);
1530 else
1531 dma_unmap_page(dev->dma_device, addr, size, direction);
1532}
1533
1534/**
1535 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1536 * @dev: The device for which the DMA addresses are to be created
1537 * @sg: The array of scatter/gather entries
1538 * @nents: The number of scatter/gather entries
1539 * @direction: The direction of the DMA
1540 */
1541static inline int ib_dma_map_sg(struct ib_device *dev,
1542 struct scatterlist *sg, int nents,
1543 enum dma_data_direction direction)
1544{
1545 if (dev->dma_ops)
1546 return dev->dma_ops->map_sg(dev, sg, nents, direction);
1547 return dma_map_sg(dev->dma_device, sg, nents, direction);
1548}
1549
1550/**
1551 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1552 * @dev: The device for which the DMA addresses were created
1553 * @sg: The array of scatter/gather entries
1554 * @nents: The number of scatter/gather entries
1555 * @direction: The direction of the DMA
1556 */
1557static inline void ib_dma_unmap_sg(struct ib_device *dev,
1558 struct scatterlist *sg, int nents,
1559 enum dma_data_direction direction)
1560{
1561 if (dev->dma_ops)
1562 dev->dma_ops->unmap_sg(dev, sg, nents, direction);
1563 else
1564 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1565}
1566
1567/**
1568 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1569 * @dev: The device for which the DMA addresses were created
1570 * @sg: The scatter/gather entry
1571 */
1572static inline u64 ib_sg_dma_address(struct ib_device *dev,
1573 struct scatterlist *sg)
1574{
1575 if (dev->dma_ops)
1576 return dev->dma_ops->dma_address(dev, sg);
1577 return sg_dma_address(sg);
1578}
1579
1580/**
1581 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1582 * @dev: The device for which the DMA addresses were created
1583 * @sg: The scatter/gather entry
1584 */
1585static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1586 struct scatterlist *sg)
1587{
1588 if (dev->dma_ops)
1589 return dev->dma_ops->dma_len(dev, sg);
1590 return sg_dma_len(sg);
1591}
1592
1593/**
1594 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1595 * @dev: The device for which the DMA address was created
1596 * @addr: The DMA address
1597 * @size: The size of the region in bytes
1598 * @dir: The direction of the DMA
1599 */
1600static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1601 u64 addr,
1602 size_t size,
1603 enum dma_data_direction dir)
1604{
1605 if (dev->dma_ops)
1606 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir);
1607 else
1608 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1609}
1610
1611/**
1612 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1613 * @dev: The device for which the DMA address was created
1614 * @addr: The DMA address
1615 * @size: The size of the region in bytes
1616 * @dir: The direction of the DMA
1617 */
1618static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1619 u64 addr,
1620 size_t size,
1621 enum dma_data_direction dir)
1622{
1623 if (dev->dma_ops)
1624 dev->dma_ops->sync_single_for_device(dev, addr, size, dir);
1625 else
1626 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1627}
1628
1629/**
1630 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1631 * @dev: The device for which the DMA address is requested
1632 * @size: The size of the region to allocate in bytes
1633 * @dma_handle: A pointer for returning the DMA address of the region
1634 * @flag: memory allocator flags
1635 */
1636static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1637 size_t size,
1638 u64 *dma_handle,
1639 gfp_t flag)
1640{
1641 if (dev->dma_ops)
1642 return dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag);
1643 else {
1644 dma_addr_t handle;
1645 void *ret;
1646
1647 ret = dma_alloc_coherent(dev->dma_device, size, &handle, flag);
1648 *dma_handle = handle;
1649 return ret;
1650 }
1651}
1652
1653/**
1654 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1655 * @dev: The device for which the DMA addresses were allocated
1656 * @size: The size of the region
1657 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1658 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1659 */
1660static inline void ib_dma_free_coherent(struct ib_device *dev,
1661 size_t size, void *cpu_addr,
1662 u64 dma_handle)
1663{
1664 if (dev->dma_ops)
1665 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
1666 else
1667 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1668}
1669
1670/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1671 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1672 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1673 * @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee5..36cd8a8526a0 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
77 int num_paths; 77 int num_paths;
78}; 78};
79 79
80struct rdma_conn_param {
81 const void *private_data;
82 u8 private_data_len;
83 u8 responder_resources;
84 u8 initiator_depth;
85 u8 flow_control;
86 u8 retry_count; /* ignored when accepting */
87 u8 rnr_retry_count;
88 /* Fields below ignored if a QP is created on the rdma_cm_id. */
89 u8 srq;
90 u32 qp_num;
91};
92
93struct rdma_ud_param {
94 const void *private_data;
95 u8 private_data_len;
96 struct ib_ah_attr ah_attr;
97 u32 qp_num;
98 u32 qkey;
99};
100
80struct rdma_cm_event { 101struct rdma_cm_event {
81 enum rdma_cm_event_type event; 102 enum rdma_cm_event_type event;
82 int status; 103 int status;
83 void *private_data; 104 union {
84 u8 private_data_len; 105 struct rdma_conn_param conn;
106 struct rdma_ud_param ud;
107 } param;
85}; 108};
86 109
87struct rdma_cm_id; 110struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
204int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 227int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
205 int *qp_attr_mask); 228 int *qp_attr_mask);
206 229
207struct rdma_conn_param {
208 const void *private_data;
209 u8 private_data_len;
210 u8 responder_resources;
211 u8 initiator_depth;
212 u8 flow_control;
213 u8 retry_count; /* ignored when accepting */
214 u8 rnr_retry_count;
215 /* Fields below ignored if a QP is created on the rdma_cm_id. */
216 u8 srq;
217 u32 qp_num;
218 enum ib_qp_type qp_type;
219};
220
221/** 230/**
222 * rdma_connect - Initiate an active connection request. 231 * rdma_connect - Initiate an active connection request.
232 * @id: Connection identifier to connect.
233 * @conn_param: Connection information used for connected QPs.
223 * 234 *
224 * Users must have resolved a route for the rdma_cm_id to connect with 235 * Users must have resolved a route for the rdma_cm_id to connect with
225 * by having called rdma_resolve_route before calling this routine. 236 * by having called rdma_resolve_route before calling this routine.
237 *
238 * This call will either connect to a remote QP or obtain remote QP
239 * information for unconnected rdma_cm_id's. The actual operation is
240 * based on the rdma_cm_id's port space.
226 */ 241 */
227int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 242int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228 243
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
253int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
254 269
255/** 270/**
271 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
272 * occurred on the connection.
273 * @id: Connection identifier to transition to established.
274 * @event: Asynchronous event.
275 *
276 * This routine should be invoked by users to notify the CM of relevant
277 * communication events. Events that should be reported to the CM and
278 * when to report them are:
279 *
280 * IB_EVENT_COMM_EST - Used when a message is received on a connected
281 * QP before an RTU has been received.
282 */
283int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
284
285/**
256 * rdma_reject - Called to reject a connection request or response. 286 * rdma_reject - Called to reject a connection request or response.
257 */ 287 */
258int rdma_reject(struct rdma_cm_id *id, const void *private_data, 288int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d4..9b176df1d667 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
44int rdma_set_ib_paths(struct rdma_cm_id *id, 44int rdma_set_ib_paths(struct rdma_cm_id *id,
45 struct ib_sa_path_rec *path_rec, int num_paths); 45 struct ib_sa_path_rec *path_rec, int num_paths);
46 46
47/* Global qkey for UD QPs and multicast groups. */
48#define RDMA_UD_QKEY 0x01234567
49
47#endif /* RDMA_CM_IB_H */ 50#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 000000000000..9572ab8eeac1
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef RDMA_USER_CM_H
34#define RDMA_USER_CM_H
35
36#include <linux/types.h>
37#include <linux/in6.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_user_sa.h>
40
41#define RDMA_USER_CM_ABI_VERSION 3
42
43#define RDMA_MAX_PRIVATE_DATA 256
44
45enum {
46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT,
53 RDMA_USER_CM_CMD_LISTEN,
54 RDMA_USER_CM_CMD_ACCEPT,
55 RDMA_USER_CM_CMD_REJECT,
56 RDMA_USER_CM_CMD_DISCONNECT,
57 RDMA_USER_CM_CMD_INIT_QP_ATTR,
58 RDMA_USER_CM_CMD_GET_EVENT,
59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY
62};
63
64/*
65 * command ABI structures.
66 */
67struct rdma_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct rdma_ucm_create_id {
74 __u64 uid;
75 __u64 response;
76 __u16 ps;
77 __u8 reserved[6];
78};
79
80struct rdma_ucm_create_id_resp {
81 __u32 id;
82};
83
84struct rdma_ucm_destroy_id {
85 __u64 response;
86 __u32 id;
87 __u32 reserved;
88};
89
90struct rdma_ucm_destroy_id_resp {
91 __u32 events_reported;
92};
93
94struct rdma_ucm_bind_addr {
95 __u64 response;
96 struct sockaddr_in6 addr;
97 __u32 id;
98};
99
100struct rdma_ucm_resolve_addr {
101 struct sockaddr_in6 src_addr;
102 struct sockaddr_in6 dst_addr;
103 __u32 id;
104 __u32 timeout_ms;
105};
106
107struct rdma_ucm_resolve_route {
108 __u32 id;
109 __u32 timeout_ms;
110};
111
112struct rdma_ucm_query_route {
113 __u64 response;
114 __u32 id;
115 __u32 reserved;
116};
117
118struct rdma_ucm_query_route_resp {
119 __u64 node_guid;
120 struct ib_user_path_rec ib_route[2];
121 struct sockaddr_in6 src_addr;
122 struct sockaddr_in6 dst_addr;
123 __u32 num_paths;
124 __u8 port_num;
125 __u8 reserved[3];
126};
127
128struct rdma_ucm_conn_param {
129 __u32 qp_num;
130 __u32 reserved;
131 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
132 __u8 private_data_len;
133 __u8 srq;
134 __u8 responder_resources;
135 __u8 initiator_depth;
136 __u8 flow_control;
137 __u8 retry_count;
138 __u8 rnr_retry_count;
139 __u8 valid;
140};
141
142struct rdma_ucm_ud_param {
143 __u32 qp_num;
144 __u32 qkey;
145 struct ib_uverbs_ah_attr ah_attr;
146 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
147 __u8 private_data_len;
148 __u8 reserved[7];
149};
150
151struct rdma_ucm_connect {
152 struct rdma_ucm_conn_param conn_param;
153 __u32 id;
154 __u32 reserved;
155};
156
157struct rdma_ucm_listen {
158 __u32 id;
159 __u32 backlog;
160};
161
162struct rdma_ucm_accept {
163 __u64 uid;
164 struct rdma_ucm_conn_param conn_param;
165 __u32 id;
166 __u32 reserved;
167};
168
169struct rdma_ucm_reject {
170 __u32 id;
171 __u8 private_data_len;
172 __u8 reserved[3];
173 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
174};
175
176struct rdma_ucm_disconnect {
177 __u32 id;
178};
179
180struct rdma_ucm_init_qp_attr {
181 __u64 response;
182 __u32 id;
183 __u32 qp_state;
184};
185
186struct rdma_ucm_notify {
187 __u32 id;
188 __u32 event;
189};
190
191struct rdma_ucm_get_event {
192 __u64 response;
193};
194
195struct rdma_ucm_event_resp {
196 __u64 uid;
197 __u32 id;
198 __u32 event;
199 __u32 status;
200 union {
201 struct rdma_ucm_conn_param conn;
202 struct rdma_ucm_ud_param ud;
203 } param;
204};
205
206#endif /* RDMA_USER_CM_H */