aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 21:52:31 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-12 21:52:31 -0500
commitb1ef951e8199d1c59f14dbe0fa22974ed57a3b48 (patch)
tree86a16643358339c23e3d7a9e608fcc90a18d2c84 /include
parent775ba7ad491a154f99871fe603f03366e84ae159 (diff)
parent82b399133b6ebf667ee635fc69ef26b61eede4bc (diff)
Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: IPoIB: Make sure struct ipoib_neigh.queue is always initialized IB/iser: Use the new verbs DMA mapping functions IB/srp: Use new verbs IB DMA mapping functions IPoIB: Use the new verbs DMA mapping functions IB/core: Use the new verbs DMA mapping functions IB/ipath: Implement new verbs DMA mapping functions IB: Add DMA mapping functions to allow device drivers to interpose RDMA/cma: Export rdma cm interface to userspace RDMA/cma: Add support for RDMA_PS_UDP RDMA/cma: Allow early transition to RTS to handle lost CM messages RDMA/cma: Report connect info with connect events RDMA/cma: Remove unneeded qp_type parameter from rdma_cm IB/ipath: Fix IRQ for PCI Express HCAs RDMA/amso1100: Fix memory leak in c2_qp_modify() IB/iser: Remove unused "write-only" variables IB/ipath: Remove unused "write-only" variables IB/fmr: ib_flush_fmr_pool() may wait too long
Diffstat (limited to 'include')
-rw-r--r--include/rdma/ib_marshall.h5
-rw-r--r--include/rdma/ib_verbs.h253
-rw-r--r--include/rdma/rdma_cm.h62
-rw-r--r--include/rdma/rdma_cm_ib.h3
-rw-r--r--include/rdma/rdma_user_cm.h206
5 files changed, 512 insertions, 17 deletions
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index 66bf4d7d0dfb..db037205c9e8 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2005 Intel Corporation. All rights reserved. 2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 * 3 *
4 * This software is available to you under a choice of one of two 4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 5 * licenses. You may choose to be licensed under the terms of the GNU
@@ -41,6 +41,9 @@
41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 41void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
42 struct ib_qp_attr *src); 42 struct ib_qp_attr *src);
43 43
44void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
45 struct ib_ah_attr *src);
46
44void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, 47void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
45 struct ib_sa_path_rec *src); 48 struct ib_sa_path_rec *src);
46 49
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8eacc3510993..fd2353fa7e12 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -43,6 +43,8 @@
43 43
44#include <linux/types.h> 44#include <linux/types.h>
45#include <linux/device.h> 45#include <linux/device.h>
46#include <linux/mm.h>
47#include <linux/dma-mapping.h>
46 48
47#include <asm/atomic.h> 49#include <asm/atomic.h>
48#include <asm/scatterlist.h> 50#include <asm/scatterlist.h>
@@ -848,6 +850,49 @@ struct ib_cache {
848 u8 *lmc_cache; 850 u8 *lmc_cache;
849}; 851};
850 852
853struct ib_dma_mapping_ops {
854 int (*mapping_error)(struct ib_device *dev,
855 u64 dma_addr);
856 u64 (*map_single)(struct ib_device *dev,
857 void *ptr, size_t size,
858 enum dma_data_direction direction);
859 void (*unmap_single)(struct ib_device *dev,
860 u64 addr, size_t size,
861 enum dma_data_direction direction);
862 u64 (*map_page)(struct ib_device *dev,
863 struct page *page, unsigned long offset,
864 size_t size,
865 enum dma_data_direction direction);
866 void (*unmap_page)(struct ib_device *dev,
867 u64 addr, size_t size,
868 enum dma_data_direction direction);
869 int (*map_sg)(struct ib_device *dev,
870 struct scatterlist *sg, int nents,
871 enum dma_data_direction direction);
872 void (*unmap_sg)(struct ib_device *dev,
873 struct scatterlist *sg, int nents,
874 enum dma_data_direction direction);
875 u64 (*dma_address)(struct ib_device *dev,
876 struct scatterlist *sg);
877 unsigned int (*dma_len)(struct ib_device *dev,
878 struct scatterlist *sg);
879 void (*sync_single_for_cpu)(struct ib_device *dev,
880 u64 dma_handle,
881 size_t size,
882 enum dma_data_direction dir);
883 void (*sync_single_for_device)(struct ib_device *dev,
884 u64 dma_handle,
885 size_t size,
886 enum dma_data_direction dir);
887 void *(*alloc_coherent)(struct ib_device *dev,
888 size_t size,
889 u64 *dma_handle,
890 gfp_t flag);
891 void (*free_coherent)(struct ib_device *dev,
892 size_t size, void *cpu_addr,
893 u64 dma_handle);
894};
895
851struct iw_cm_verbs; 896struct iw_cm_verbs;
852 897
853struct ib_device { 898struct ib_device {
@@ -992,6 +1037,8 @@ struct ib_device {
992 struct ib_mad *in_mad, 1037 struct ib_mad *in_mad,
993 struct ib_mad *out_mad); 1038 struct ib_mad *out_mad);
994 1039
1040 struct ib_dma_mapping_ops *dma_ops;
1041
995 struct module *owner; 1042 struct module *owner;
996 struct class_device class_dev; 1043 struct class_device class_dev;
997 struct kobject ports_parent; 1044 struct kobject ports_parent;
@@ -1395,10 +1442,216 @@ static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1395 * usable for DMA. 1442 * usable for DMA.
1396 * @pd: The protection domain associated with the memory region. 1443 * @pd: The protection domain associated with the memory region.
1397 * @mr_access_flags: Specifies the memory access rights. 1444 * @mr_access_flags: Specifies the memory access rights.
1445 *
1446 * Note that the ib_dma_*() functions defined below must be used
1447 * to create/destroy addresses used with the Lkey or Rkey returned
1448 * by ib_get_dma_mr().
1398 */ 1449 */
1399struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags); 1450struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1400 1451
1401/** 1452/**
1453 * ib_dma_mapping_error - check a DMA addr for error
1454 * @dev: The device for which the dma_addr was created
1455 * @dma_addr: The DMA address to check
1456 */
1457static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
1458{
1459 return dev->dma_ops ?
1460 dev->dma_ops->mapping_error(dev, dma_addr) :
1461 dma_mapping_error(dma_addr);
1462}
1463
1464/**
1465 * ib_dma_map_single - Map a kernel virtual address to DMA address
1466 * @dev: The device for which the dma_addr is to be created
1467 * @cpu_addr: The kernel virtual address
1468 * @size: The size of the region in bytes
1469 * @direction: The direction of the DMA
1470 */
1471static inline u64 ib_dma_map_single(struct ib_device *dev,
1472 void *cpu_addr, size_t size,
1473 enum dma_data_direction direction)
1474{
1475 return dev->dma_ops ?
1476 dev->dma_ops->map_single(dev, cpu_addr, size, direction) :
1477 dma_map_single(dev->dma_device, cpu_addr, size, direction);
1478}
1479
1480/**
1481 * ib_dma_unmap_single - Destroy a mapping created by ib_dma_map_single()
1482 * @dev: The device for which the DMA address was created
1483 * @addr: The DMA address
1484 * @size: The size of the region in bytes
1485 * @direction: The direction of the DMA
1486 */
1487static inline void ib_dma_unmap_single(struct ib_device *dev,
1488 u64 addr, size_t size,
1489 enum dma_data_direction direction)
1490{
1491 dev->dma_ops ?
1492 dev->dma_ops->unmap_single(dev, addr, size, direction) :
1493 dma_unmap_single(dev->dma_device, addr, size, direction);
1494}
1495
1496/**
1497 * ib_dma_map_page - Map a physical page to DMA address
1498 * @dev: The device for which the dma_addr is to be created
1499 * @page: The page to be mapped
1500 * @offset: The offset within the page
1501 * @size: The size of the region in bytes
1502 * @direction: The direction of the DMA
1503 */
1504static inline u64 ib_dma_map_page(struct ib_device *dev,
1505 struct page *page,
1506 unsigned long offset,
1507 size_t size,
1508 enum dma_data_direction direction)
1509{
1510 return dev->dma_ops ?
1511 dev->dma_ops->map_page(dev, page, offset, size, direction) :
1512 dma_map_page(dev->dma_device, page, offset, size, direction);
1513}
1514
1515/**
1516 * ib_dma_unmap_page - Destroy a mapping created by ib_dma_map_page()
1517 * @dev: The device for which the DMA address was created
1518 * @addr: The DMA address
1519 * @size: The size of the region in bytes
1520 * @direction: The direction of the DMA
1521 */
1522static inline void ib_dma_unmap_page(struct ib_device *dev,
1523 u64 addr, size_t size,
1524 enum dma_data_direction direction)
1525{
1526 dev->dma_ops ?
1527 dev->dma_ops->unmap_page(dev, addr, size, direction) :
1528 dma_unmap_page(dev->dma_device, addr, size, direction);
1529}
1530
1531/**
1532 * ib_dma_map_sg - Map a scatter/gather list to DMA addresses
1533 * @dev: The device for which the DMA addresses are to be created
1534 * @sg: The array of scatter/gather entries
1535 * @nents: The number of scatter/gather entries
1536 * @direction: The direction of the DMA
1537 */
1538static inline int ib_dma_map_sg(struct ib_device *dev,
1539 struct scatterlist *sg, int nents,
1540 enum dma_data_direction direction)
1541{
1542 return dev->dma_ops ?
1543 dev->dma_ops->map_sg(dev, sg, nents, direction) :
1544 dma_map_sg(dev->dma_device, sg, nents, direction);
1545}
1546
1547/**
1548 * ib_dma_unmap_sg - Unmap a scatter/gather list of DMA addresses
1549 * @dev: The device for which the DMA addresses were created
1550 * @sg: The array of scatter/gather entries
1551 * @nents: The number of scatter/gather entries
1552 * @direction: The direction of the DMA
1553 */
1554static inline void ib_dma_unmap_sg(struct ib_device *dev,
1555 struct scatterlist *sg, int nents,
1556 enum dma_data_direction direction)
1557{
1558 dev->dma_ops ?
1559 dev->dma_ops->unmap_sg(dev, sg, nents, direction) :
1560 dma_unmap_sg(dev->dma_device, sg, nents, direction);
1561}
1562
1563/**
1564 * ib_sg_dma_address - Return the DMA address from a scatter/gather entry
1565 * @dev: The device for which the DMA addresses were created
1566 * @sg: The scatter/gather entry
1567 */
1568static inline u64 ib_sg_dma_address(struct ib_device *dev,
1569 struct scatterlist *sg)
1570{
1571 return dev->dma_ops ?
1572 dev->dma_ops->dma_address(dev, sg) : sg_dma_address(sg);
1573}
1574
1575/**
1576 * ib_sg_dma_len - Return the DMA length from a scatter/gather entry
1577 * @dev: The device for which the DMA addresses were created
1578 * @sg: The scatter/gather entry
1579 */
1580static inline unsigned int ib_sg_dma_len(struct ib_device *dev,
1581 struct scatterlist *sg)
1582{
1583 return dev->dma_ops ?
1584 dev->dma_ops->dma_len(dev, sg) : sg_dma_len(sg);
1585}
1586
1587/**
1588 * ib_dma_sync_single_for_cpu - Prepare DMA region to be accessed by CPU
1589 * @dev: The device for which the DMA address was created
1590 * @addr: The DMA address
1591 * @size: The size of the region in bytes
1592 * @dir: The direction of the DMA
1593 */
1594static inline void ib_dma_sync_single_for_cpu(struct ib_device *dev,
1595 u64 addr,
1596 size_t size,
1597 enum dma_data_direction dir)
1598{
1599 dev->dma_ops ?
1600 dev->dma_ops->sync_single_for_cpu(dev, addr, size, dir) :
1601 dma_sync_single_for_cpu(dev->dma_device, addr, size, dir);
1602}
1603
1604/**
1605 * ib_dma_sync_single_for_device - Prepare DMA region to be accessed by device
1606 * @dev: The device for which the DMA address was created
1607 * @addr: The DMA address
1608 * @size: The size of the region in bytes
1609 * @dir: The direction of the DMA
1610 */
1611static inline void ib_dma_sync_single_for_device(struct ib_device *dev,
1612 u64 addr,
1613 size_t size,
1614 enum dma_data_direction dir)
1615{
1616 dev->dma_ops ?
1617 dev->dma_ops->sync_single_for_device(dev, addr, size, dir) :
1618 dma_sync_single_for_device(dev->dma_device, addr, size, dir);
1619}
1620
1621/**
1622 * ib_dma_alloc_coherent - Allocate memory and map it for DMA
1623 * @dev: The device for which the DMA address is requested
1624 * @size: The size of the region to allocate in bytes
1625 * @dma_handle: A pointer for returning the DMA address of the region
1626 * @flag: memory allocator flags
1627 */
1628static inline void *ib_dma_alloc_coherent(struct ib_device *dev,
1629 size_t size,
1630 u64 *dma_handle,
1631 gfp_t flag)
1632{
1633 return dev->dma_ops ?
1634 dev->dma_ops->alloc_coherent(dev, size, dma_handle, flag) :
1635 dma_alloc_coherent(dev->dma_device, size, dma_handle, flag);
1636}
1637
1638/**
1639 * ib_dma_free_coherent - Free memory allocated by ib_dma_alloc_coherent()
1640 * @dev: The device for which the DMA addresses were allocated
1641 * @size: The size of the region
1642 * @cpu_addr: the address returned by ib_dma_alloc_coherent()
1643 * @dma_handle: the DMA address returned by ib_dma_alloc_coherent()
1644 */
1645static inline void ib_dma_free_coherent(struct ib_device *dev,
1646 size_t size, void *cpu_addr,
1647 u64 dma_handle)
1648{
1649 dev->dma_ops ?
1650 dev->dma_ops->free_coherent(dev, size, cpu_addr, dma_handle) :
1651 dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
1652}
1653
1654/**
1402 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use 1655 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1403 * by an HCA. 1656 * by an HCA.
1404 * @pd: The protection domain associated assigned to the registered region. 1657 * @pd: The protection domain associated assigned to the registered region.
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index deb5a0a4cee5..36cd8a8526a0 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -77,11 +77,34 @@ struct rdma_route {
77 int num_paths; 77 int num_paths;
78}; 78};
79 79
80struct rdma_conn_param {
81 const void *private_data;
82 u8 private_data_len;
83 u8 responder_resources;
84 u8 initiator_depth;
85 u8 flow_control;
86 u8 retry_count; /* ignored when accepting */
87 u8 rnr_retry_count;
88 /* Fields below ignored if a QP is created on the rdma_cm_id. */
89 u8 srq;
90 u32 qp_num;
91};
92
93struct rdma_ud_param {
94 const void *private_data;
95 u8 private_data_len;
96 struct ib_ah_attr ah_attr;
97 u32 qp_num;
98 u32 qkey;
99};
100
80struct rdma_cm_event { 101struct rdma_cm_event {
81 enum rdma_cm_event_type event; 102 enum rdma_cm_event_type event;
82 int status; 103 int status;
83 void *private_data; 104 union {
84 u8 private_data_len; 105 struct rdma_conn_param conn;
106 struct rdma_ud_param ud;
107 } param;
85}; 108};
86 109
87struct rdma_cm_id; 110struct rdma_cm_id;
@@ -204,25 +227,17 @@ void rdma_destroy_qp(struct rdma_cm_id *id);
204int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, 227int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
205 int *qp_attr_mask); 228 int *qp_attr_mask);
206 229
207struct rdma_conn_param {
208 const void *private_data;
209 u8 private_data_len;
210 u8 responder_resources;
211 u8 initiator_depth;
212 u8 flow_control;
213 u8 retry_count; /* ignored when accepting */
214 u8 rnr_retry_count;
215 /* Fields below ignored if a QP is created on the rdma_cm_id. */
216 u8 srq;
217 u32 qp_num;
218 enum ib_qp_type qp_type;
219};
220
221/** 230/**
222 * rdma_connect - Initiate an active connection request. 231 * rdma_connect - Initiate an active connection request.
232 * @id: Connection identifier to connect.
233 * @conn_param: Connection information used for connected QPs.
223 * 234 *
224 * Users must have resolved a route for the rdma_cm_id to connect with 235 * Users must have resolved a route for the rdma_cm_id to connect with
225 * by having called rdma_resolve_route before calling this routine. 236 * by having called rdma_resolve_route before calling this routine.
237 *
238 * This call will either connect to a remote QP or obtain remote QP
239 * information for unconnected rdma_cm_id's. The actual operation is
240 * based on the rdma_cm_id's port space.
226 */ 241 */
227int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 242int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
228 243
@@ -253,6 +268,21 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
253int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param); 268int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
254 269
255/** 270/**
271 * rdma_notify - Notifies the RDMA CM of an asynchronous event that has
272 * occurred on the connection.
273 * @id: Connection identifier to transition to established.
274 * @event: Asynchronous event.
275 *
276 * This routine should be invoked by users to notify the CM of relevant
277 * communication events. Events that should be reported to the CM and
278 * when to report them are:
279 *
280 * IB_EVENT_COMM_EST - Used when a message is received on a connected
281 * QP before an RTU has been received.
282 */
283int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
284
285/**
256 * rdma_reject - Called to reject a connection request or response. 286 * rdma_reject - Called to reject a connection request or response.
257 */ 287 */
258int rdma_reject(struct rdma_cm_id *id, const void *private_data, 288int rdma_reject(struct rdma_cm_id *id, const void *private_data,
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index e8c3af1804d4..9b176df1d667 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -44,4 +44,7 @@
44int rdma_set_ib_paths(struct rdma_cm_id *id, 44int rdma_set_ib_paths(struct rdma_cm_id *id,
45 struct ib_sa_path_rec *path_rec, int num_paths); 45 struct ib_sa_path_rec *path_rec, int num_paths);
46 46
47/* Global qkey for UD QPs and multicast groups. */
48#define RDMA_UD_QKEY 0x01234567
49
47#endif /* RDMA_CM_IB_H */ 50#endif /* RDMA_CM_IB_H */
diff --git a/include/rdma/rdma_user_cm.h b/include/rdma/rdma_user_cm.h
new file mode 100644
index 000000000000..9572ab8eeac1
--- /dev/null
+++ b/include/rdma/rdma_user_cm.h
@@ -0,0 +1,206 @@
1/*
2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef RDMA_USER_CM_H
34#define RDMA_USER_CM_H
35
36#include <linux/types.h>
37#include <linux/in6.h>
38#include <rdma/ib_user_verbs.h>
39#include <rdma/ib_user_sa.h>
40
41#define RDMA_USER_CM_ABI_VERSION 3
42
43#define RDMA_MAX_PRIVATE_DATA 256
44
45enum {
46 RDMA_USER_CM_CMD_CREATE_ID,
47 RDMA_USER_CM_CMD_DESTROY_ID,
48 RDMA_USER_CM_CMD_BIND_ADDR,
49 RDMA_USER_CM_CMD_RESOLVE_ADDR,
50 RDMA_USER_CM_CMD_RESOLVE_ROUTE,
51 RDMA_USER_CM_CMD_QUERY_ROUTE,
52 RDMA_USER_CM_CMD_CONNECT,
53 RDMA_USER_CM_CMD_LISTEN,
54 RDMA_USER_CM_CMD_ACCEPT,
55 RDMA_USER_CM_CMD_REJECT,
56 RDMA_USER_CM_CMD_DISCONNECT,
57 RDMA_USER_CM_CMD_INIT_QP_ATTR,
58 RDMA_USER_CM_CMD_GET_EVENT,
59 RDMA_USER_CM_CMD_GET_OPTION,
60 RDMA_USER_CM_CMD_SET_OPTION,
61 RDMA_USER_CM_CMD_NOTIFY
62};
63
64/*
65 * command ABI structures.
66 */
67struct rdma_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct rdma_ucm_create_id {
74 __u64 uid;
75 __u64 response;
76 __u16 ps;
77 __u8 reserved[6];
78};
79
80struct rdma_ucm_create_id_resp {
81 __u32 id;
82};
83
84struct rdma_ucm_destroy_id {
85 __u64 response;
86 __u32 id;
87 __u32 reserved;
88};
89
90struct rdma_ucm_destroy_id_resp {
91 __u32 events_reported;
92};
93
94struct rdma_ucm_bind_addr {
95 __u64 response;
96 struct sockaddr_in6 addr;
97 __u32 id;
98};
99
100struct rdma_ucm_resolve_addr {
101 struct sockaddr_in6 src_addr;
102 struct sockaddr_in6 dst_addr;
103 __u32 id;
104 __u32 timeout_ms;
105};
106
107struct rdma_ucm_resolve_route {
108 __u32 id;
109 __u32 timeout_ms;
110};
111
112struct rdma_ucm_query_route {
113 __u64 response;
114 __u32 id;
115 __u32 reserved;
116};
117
118struct rdma_ucm_query_route_resp {
119 __u64 node_guid;
120 struct ib_user_path_rec ib_route[2];
121 struct sockaddr_in6 src_addr;
122 struct sockaddr_in6 dst_addr;
123 __u32 num_paths;
124 __u8 port_num;
125 __u8 reserved[3];
126};
127
128struct rdma_ucm_conn_param {
129 __u32 qp_num;
130 __u32 reserved;
131 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
132 __u8 private_data_len;
133 __u8 srq;
134 __u8 responder_resources;
135 __u8 initiator_depth;
136 __u8 flow_control;
137 __u8 retry_count;
138 __u8 rnr_retry_count;
139 __u8 valid;
140};
141
142struct rdma_ucm_ud_param {
143 __u32 qp_num;
144 __u32 qkey;
145 struct ib_uverbs_ah_attr ah_attr;
146 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
147 __u8 private_data_len;
148 __u8 reserved[7];
149};
150
151struct rdma_ucm_connect {
152 struct rdma_ucm_conn_param conn_param;
153 __u32 id;
154 __u32 reserved;
155};
156
157struct rdma_ucm_listen {
158 __u32 id;
159 __u32 backlog;
160};
161
162struct rdma_ucm_accept {
163 __u64 uid;
164 struct rdma_ucm_conn_param conn_param;
165 __u32 id;
166 __u32 reserved;
167};
168
169struct rdma_ucm_reject {
170 __u32 id;
171 __u8 private_data_len;
172 __u8 reserved[3];
173 __u8 private_data[RDMA_MAX_PRIVATE_DATA];
174};
175
176struct rdma_ucm_disconnect {
177 __u32 id;
178};
179
180struct rdma_ucm_init_qp_attr {
181 __u64 response;
182 __u32 id;
183 __u32 qp_state;
184};
185
186struct rdma_ucm_notify {
187 __u32 id;
188 __u32 event;
189};
190
191struct rdma_ucm_get_event {
192 __u64 response;
193};
194
195struct rdma_ucm_event_resp {
196 __u64 uid;
197 __u32 id;
198 __u32 event;
199 __u32 status;
200 union {
201 struct rdma_ucm_conn_param conn;
202 struct rdma_ucm_ud_param ud;
203 } param;
204};
205
206#endif /* RDMA_USER_CM_H */