aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:36:48 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-08-29 13:36:48 -0400
commita78b3371b628559eb5c46ee1518df27c62f3e801 (patch)
treedd32333307ce6a7e4d39ea8c07c34bc3dc5540a1 /include
parent97c169a21bfb5bb2ab2bccd852da4f0d0e021c55 (diff)
parenta4d61e84804f3b14cc35c5e2af768a07c0f64ef6 (diff)
Merge HEAD from master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband.git
Diffstat (limited to 'include')
-rw-r--r--include/rdma/ib_cache.h105
-rw-r--r--include/rdma/ib_cm.h568
-rw-r--r--include/rdma/ib_fmr_pool.h93
-rw-r--r--include/rdma/ib_mad.h579
-rw-r--r--include/rdma/ib_pack.h245
-rw-r--r--include/rdma/ib_sa.h373
-rw-r--r--include/rdma/ib_smi.h94
-rw-r--r--include/rdma/ib_user_cm.h328
-rw-r--r--include/rdma/ib_user_mad.h137
-rw-r--r--include/rdma/ib_user_verbs.h422
-rw-r--r--include/rdma/ib_verbs.h1461
11 files changed, 4405 insertions, 0 deletions
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
new file mode 100644
index 000000000000..5bf9834f7dca
--- /dev/null
+++ b/include/rdma/ib_cache.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 *
34 * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $
35 */
36
37#ifndef _IB_CACHE_H
38#define _IB_CACHE_H
39
40#include <rdma/ib_verbs.h>
41
42/**
43 * ib_get_cached_gid - Returns a cached GID table entry
44 * @device: The device to query.
45 * @port_num: The port number of the device to query.
46 * @index: The index into the cached GID table to query.
47 * @gid: The GID value found at the specified index.
48 *
49 * ib_get_cached_gid() fetches the specified GID table entry stored in
50 * the local software cache.
51 */
52int ib_get_cached_gid(struct ib_device *device,
53 u8 port_num,
54 int index,
55 union ib_gid *gid);
56
57/**
58 * ib_find_cached_gid - Returns the port number and GID table index where
59 * a specified GID value occurs.
60 * @device: The device to query.
61 * @gid: The GID value to search for.
62 * @port_num: The port number of the device where the GID value was found.
63 * @index: The index into the cached GID table where the GID was found. This
64 * parameter may be NULL.
65 *
66 * ib_find_cached_gid() searches for the specified GID value in
67 * the local software cache.
68 */
69int ib_find_cached_gid(struct ib_device *device,
70 union ib_gid *gid,
71 u8 *port_num,
72 u16 *index);
73
74/**
75 * ib_get_cached_pkey - Returns a cached PKey table entry
76 * @device: The device to query.
77 * @port_num: The port number of the device to query.
78 * @index: The index into the cached PKey table to query.
79 * @pkey: The PKey value found at the specified index.
80 *
81 * ib_get_cached_pkey() fetches the specified PKey table entry stored in
82 * the local software cache.
83 */
84int ib_get_cached_pkey(struct ib_device *device_handle,
85 u8 port_num,
86 int index,
87 u16 *pkey);
88
89/**
90 * ib_find_cached_pkey - Returns the PKey table index where a specified
91 * PKey value occurs.
92 * @device: The device to query.
93 * @port_num: The port number of the device to search for the PKey.
94 * @pkey: The PKey value to search for.
95 * @index: The index into the cached PKey table where the PKey was found.
96 *
97 * ib_find_cached_pkey() searches the specified PKey table in
98 * the local software cache.
99 */
100int ib_find_cached_pkey(struct ib_device *device,
101 u8 port_num,
102 u16 pkey,
103 u16 *index);
104
105#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
new file mode 100644
index 000000000000..77fe9039209b
--- /dev/null
+++ b/include/rdma/ib_cm.h
@@ -0,0 +1,568 @@
1/*
2 * Copyright (c) 2004 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 *
35 * $Id: ib_cm.h 2730 2005-06-28 16:43:03Z sean.hefty $
36 */
37#if !defined(IB_CM_H)
38#define IB_CM_H
39
40#include <rdma/ib_mad.h>
41#include <rdma/ib_sa.h>
42
43enum ib_cm_state {
44 IB_CM_IDLE,
45 IB_CM_LISTEN,
46 IB_CM_REQ_SENT,
47 IB_CM_REQ_RCVD,
48 IB_CM_MRA_REQ_SENT,
49 IB_CM_MRA_REQ_RCVD,
50 IB_CM_REP_SENT,
51 IB_CM_REP_RCVD,
52 IB_CM_MRA_REP_SENT,
53 IB_CM_MRA_REP_RCVD,
54 IB_CM_ESTABLISHED,
55 IB_CM_DREQ_SENT,
56 IB_CM_DREQ_RCVD,
57 IB_CM_TIMEWAIT,
58 IB_CM_SIDR_REQ_SENT,
59 IB_CM_SIDR_REQ_RCVD
60};
61
62enum ib_cm_lap_state {
63 IB_CM_LAP_IDLE,
64 IB_CM_LAP_SENT,
65 IB_CM_LAP_RCVD,
66 IB_CM_MRA_LAP_SENT,
67 IB_CM_MRA_LAP_RCVD,
68};
69
70enum ib_cm_event_type {
71 IB_CM_REQ_ERROR,
72 IB_CM_REQ_RECEIVED,
73 IB_CM_REP_ERROR,
74 IB_CM_REP_RECEIVED,
75 IB_CM_RTU_RECEIVED,
76 IB_CM_USER_ESTABLISHED,
77 IB_CM_DREQ_ERROR,
78 IB_CM_DREQ_RECEIVED,
79 IB_CM_DREP_RECEIVED,
80 IB_CM_TIMEWAIT_EXIT,
81 IB_CM_MRA_RECEIVED,
82 IB_CM_REJ_RECEIVED,
83 IB_CM_LAP_ERROR,
84 IB_CM_LAP_RECEIVED,
85 IB_CM_APR_RECEIVED,
86 IB_CM_SIDR_REQ_ERROR,
87 IB_CM_SIDR_REQ_RECEIVED,
88 IB_CM_SIDR_REP_RECEIVED
89};
90
91enum ib_cm_data_size {
92 IB_CM_REQ_PRIVATE_DATA_SIZE = 92,
93 IB_CM_MRA_PRIVATE_DATA_SIZE = 222,
94 IB_CM_REJ_PRIVATE_DATA_SIZE = 148,
95 IB_CM_REP_PRIVATE_DATA_SIZE = 196,
96 IB_CM_RTU_PRIVATE_DATA_SIZE = 224,
97 IB_CM_DREQ_PRIVATE_DATA_SIZE = 220,
98 IB_CM_DREP_PRIVATE_DATA_SIZE = 224,
99 IB_CM_REJ_ARI_LENGTH = 72,
100 IB_CM_LAP_PRIVATE_DATA_SIZE = 168,
101 IB_CM_APR_PRIVATE_DATA_SIZE = 148,
102 IB_CM_APR_INFO_LENGTH = 72,
103 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
104 IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
105 IB_CM_SIDR_REP_INFO_LENGTH = 72
106};
107
108struct ib_cm_id;
109
110struct ib_cm_req_event_param {
111 struct ib_cm_id *listen_id;
112 struct ib_device *device;
113 u8 port;
114
115 struct ib_sa_path_rec *primary_path;
116 struct ib_sa_path_rec *alternate_path;
117
118 __be64 remote_ca_guid;
119 u32 remote_qkey;
120 u32 remote_qpn;
121 enum ib_qp_type qp_type;
122
123 u32 starting_psn;
124 u8 responder_resources;
125 u8 initiator_depth;
126 unsigned int local_cm_response_timeout:5;
127 unsigned int flow_control:1;
128 unsigned int remote_cm_response_timeout:5;
129 unsigned int retry_count:3;
130 unsigned int rnr_retry_count:3;
131 unsigned int srq:1;
132};
133
134struct ib_cm_rep_event_param {
135 __be64 remote_ca_guid;
136 u32 remote_qkey;
137 u32 remote_qpn;
138 u32 starting_psn;
139 u8 responder_resources;
140 u8 initiator_depth;
141 unsigned int target_ack_delay:5;
142 unsigned int failover_accepted:2;
143 unsigned int flow_control:1;
144 unsigned int rnr_retry_count:3;
145 unsigned int srq:1;
146};
147
148enum ib_cm_rej_reason {
149 IB_CM_REJ_NO_QP = 1,
150 IB_CM_REJ_NO_EEC = 2,
151 IB_CM_REJ_NO_RESOURCES = 3,
152 IB_CM_REJ_TIMEOUT = 4,
153 IB_CM_REJ_UNSUPPORTED = 5,
154 IB_CM_REJ_INVALID_COMM_ID = 6,
155 IB_CM_REJ_INVALID_COMM_INSTANCE = 7,
156 IB_CM_REJ_INVALID_SERVICE_ID = 8,
157 IB_CM_REJ_INVALID_TRANSPORT_TYPE = 9,
158 IB_CM_REJ_STALE_CONN = 10,
159 IB_CM_REJ_RDC_NOT_EXIST = 11,
160 IB_CM_REJ_INVALID_GID = 12,
161 IB_CM_REJ_INVALID_LID = 13,
162 IB_CM_REJ_INVALID_SL = 14,
163 IB_CM_REJ_INVALID_TRAFFIC_CLASS = 15,
164 IB_CM_REJ_INVALID_HOP_LIMIT = 16,
165 IB_CM_REJ_INVALID_PACKET_RATE = 17,
166 IB_CM_REJ_INVALID_ALT_GID = 18,
167 IB_CM_REJ_INVALID_ALT_LID = 19,
168 IB_CM_REJ_INVALID_ALT_SL = 20,
169 IB_CM_REJ_INVALID_ALT_TRAFFIC_CLASS = 21,
170 IB_CM_REJ_INVALID_ALT_HOP_LIMIT = 22,
171 IB_CM_REJ_INVALID_ALT_PACKET_RATE = 23,
172 IB_CM_REJ_PORT_CM_REDIRECT = 24,
173 IB_CM_REJ_PORT_REDIRECT = 25,
174 IB_CM_REJ_INVALID_MTU = 26,
175 IB_CM_REJ_INSUFFICIENT_RESP_RESOURCES = 27,
176 IB_CM_REJ_CONSUMER_DEFINED = 28,
177 IB_CM_REJ_INVALID_RNR_RETRY = 29,
178 IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
179 IB_CM_REJ_INVALID_CLASS_VERSION = 31,
180 IB_CM_REJ_INVALID_FLOW_LABEL = 32,
181 IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
182};
183
184struct ib_cm_rej_event_param {
185 enum ib_cm_rej_reason reason;
186 void *ari;
187 u8 ari_length;
188};
189
190struct ib_cm_mra_event_param {
191 u8 service_timeout;
192};
193
194struct ib_cm_lap_event_param {
195 struct ib_sa_path_rec *alternate_path;
196};
197
198enum ib_cm_apr_status {
199 IB_CM_APR_SUCCESS,
200 IB_CM_APR_INVALID_COMM_ID,
201 IB_CM_APR_UNSUPPORTED,
202 IB_CM_APR_REJECT,
203 IB_CM_APR_REDIRECT,
204 IB_CM_APR_IS_CURRENT,
205 IB_CM_APR_INVALID_QPN_EECN,
206 IB_CM_APR_INVALID_LID,
207 IB_CM_APR_INVALID_GID,
208 IB_CM_APR_INVALID_FLOW_LABEL,
209 IB_CM_APR_INVALID_TCLASS,
210 IB_CM_APR_INVALID_HOP_LIMIT,
211 IB_CM_APR_INVALID_PACKET_RATE,
212 IB_CM_APR_INVALID_SL
213};
214
215struct ib_cm_apr_event_param {
216 enum ib_cm_apr_status ap_status;
217 void *apr_info;
218 u8 info_len;
219};
220
221struct ib_cm_sidr_req_event_param {
222 struct ib_cm_id *listen_id;
223 struct ib_device *device;
224 u8 port;
225 u16 pkey;
226};
227
228enum ib_cm_sidr_status {
229 IB_SIDR_SUCCESS,
230 IB_SIDR_UNSUPPORTED,
231 IB_SIDR_REJECT,
232 IB_SIDR_NO_QP,
233 IB_SIDR_REDIRECT,
234 IB_SIDR_UNSUPPORTED_VERSION
235};
236
237struct ib_cm_sidr_rep_event_param {
238 enum ib_cm_sidr_status status;
239 u32 qkey;
240 u32 qpn;
241 void *info;
242 u8 info_len;
243
244};
245
246struct ib_cm_event {
247 enum ib_cm_event_type event;
248 union {
249 struct ib_cm_req_event_param req_rcvd;
250 struct ib_cm_rep_event_param rep_rcvd;
251 /* No data for RTU received events. */
252 struct ib_cm_rej_event_param rej_rcvd;
253 struct ib_cm_mra_event_param mra_rcvd;
254 struct ib_cm_lap_event_param lap_rcvd;
255 struct ib_cm_apr_event_param apr_rcvd;
256 /* No data for DREQ/DREP received events. */
257 struct ib_cm_sidr_req_event_param sidr_req_rcvd;
258 struct ib_cm_sidr_rep_event_param sidr_rep_rcvd;
259 enum ib_wc_status send_status;
260 } param;
261
262 void *private_data;
263};
264
265/**
266 * ib_cm_handler - User-defined callback to process communication events.
267 * @cm_id: Communication identifier associated with the reported event.
268 * @event: Information about the communication event.
269 *
270 * IB_CM_REQ_RECEIVED and IB_CM_SIDR_REQ_RECEIVED communication events
271 * generated as a result of listen requests result in the allocation of a
272 * new @cm_id. The new @cm_id is returned to the user through this callback.
273 * Clients are responsible for destroying the new @cm_id. For peer-to-peer
274 * IB_CM_REQ_RECEIVED and all other events, the returned @cm_id corresponds
275 * to a user's existing communication identifier.
276 *
277 * Users may not call ib_destroy_cm_id while in the context of this callback;
278 * however, returning a non-zero value instructs the communication manager to
279 * destroy the @cm_id after the callback completes.
280 */
281typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
282 struct ib_cm_event *event);
283
284struct ib_cm_id {
285 ib_cm_handler cm_handler;
286 void *context;
287 __be64 service_id;
288 __be64 service_mask;
289 enum ib_cm_state state; /* internal CM/debug use */
290 enum ib_cm_lap_state lap_state; /* internal CM/debug use */
291 __be32 local_id;
292 __be32 remote_id;
293};
294
295/**
296 * ib_create_cm_id - Allocate a communication identifier.
297 * @cm_handler: Callback invoked to notify the user of CM events.
298 * @context: User specified context associated with the communication
299 * identifier.
300 *
301 * Communication identifiers are used to track connection states, service
302 * ID resolution requests, and listen requests.
303 */
304struct ib_cm_id *ib_create_cm_id(ib_cm_handler cm_handler,
305 void *context);
306
307/**
308 * ib_destroy_cm_id - Destroy a connection identifier.
309 * @cm_id: Connection identifier to destroy.
310 *
311 * This call blocks until the connection identifier is destroyed.
312 */
313void ib_destroy_cm_id(struct ib_cm_id *cm_id);
314
315#define IB_SERVICE_ID_AGN_MASK __constant_cpu_to_be64(0xFF00000000000000ULL)
316#define IB_CM_ASSIGN_SERVICE_ID __constant_cpu_to_be64(0x0200000000000000ULL)
317
318/**
319 * ib_cm_listen - Initiates listening on the specified service ID for
320 * connection and service ID resolution requests.
321 * @cm_id: Connection identifier associated with the listen request.
322 * @service_id: Service identifier matched against incoming connection
323 * and service ID resolution requests. The service ID should be specified
324 * network-byte order. If set to IB_CM_ASSIGN_SERVICE_ID, the CM will
325 * assign a service ID to the caller.
326 * @service_mask: Mask applied to service ID used to listen across a
327 * range of service IDs. If set to 0, the service ID is matched
328 * exactly. This parameter is ignored if %service_id is set to
329 * IB_CM_ASSIGN_SERVICE_ID.
330 */
331int ib_cm_listen(struct ib_cm_id *cm_id,
332 __be64 service_id,
333 __be64 service_mask);
334
335struct ib_cm_req_param {
336 struct ib_sa_path_rec *primary_path;
337 struct ib_sa_path_rec *alternate_path;
338 __be64 service_id;
339 u32 qp_num;
340 enum ib_qp_type qp_type;
341 u32 starting_psn;
342 const void *private_data;
343 u8 private_data_len;
344 u8 peer_to_peer;
345 u8 responder_resources;
346 u8 initiator_depth;
347 u8 remote_cm_response_timeout;
348 u8 flow_control;
349 u8 local_cm_response_timeout;
350 u8 retry_count;
351 u8 rnr_retry_count;
352 u8 max_cm_retries;
353 u8 srq;
354};
355
356/**
357 * ib_send_cm_req - Sends a connection request to the remote node.
358 * @cm_id: Connection identifier that will be associated with the
359 * connection request.
360 * @param: Connection request information needed to establish the
361 * connection.
362 */
363int ib_send_cm_req(struct ib_cm_id *cm_id,
364 struct ib_cm_req_param *param);
365
366struct ib_cm_rep_param {
367 u32 qp_num;
368 u32 starting_psn;
369 const void *private_data;
370 u8 private_data_len;
371 u8 responder_resources;
372 u8 initiator_depth;
373 u8 target_ack_delay;
374 u8 failover_accepted;
375 u8 flow_control;
376 u8 rnr_retry_count;
377 u8 srq;
378};
379
380/**
381 * ib_send_cm_rep - Sends a connection reply in response to a connection
382 * request.
383 * @cm_id: Connection identifier that will be associated with the
384 * connection request.
385 * @param: Connection reply information needed to establish the
386 * connection.
387 */
388int ib_send_cm_rep(struct ib_cm_id *cm_id,
389 struct ib_cm_rep_param *param);
390
391/**
392 * ib_send_cm_rtu - Sends a connection ready to use message in response
393 * to a connection reply message.
394 * @cm_id: Connection identifier associated with the connection request.
395 * @private_data: Optional user-defined private data sent with the
396 * ready to use message.
397 * @private_data_len: Size of the private data buffer, in bytes.
398 */
399int ib_send_cm_rtu(struct ib_cm_id *cm_id,
400 const void *private_data,
401 u8 private_data_len);
402
403/**
404 * ib_send_cm_dreq - Sends a disconnection request for an existing
405 * connection.
406 * @cm_id: Connection identifier associated with the connection being
407 * released.
408 * @private_data: Optional user-defined private data sent with the
409 * disconnection request message.
410 * @private_data_len: Size of the private data buffer, in bytes.
411 */
412int ib_send_cm_dreq(struct ib_cm_id *cm_id,
413 const void *private_data,
414 u8 private_data_len);
415
416/**
417 * ib_send_cm_drep - Sends a disconnection reply to a disconnection request.
418 * @cm_id: Connection identifier associated with the connection being
419 * released.
420 * @private_data: Optional user-defined private data sent with the
421 * disconnection reply message.
422 * @private_data_len: Size of the private data buffer, in bytes.
423 *
424 * If the cm_id is in the correct state, the CM will transition the connection
425 * to the timewait state, even if an error occurs sending the DREP message.
426 */
427int ib_send_cm_drep(struct ib_cm_id *cm_id,
428 const void *private_data,
429 u8 private_data_len);
430
431/**
432 * ib_cm_establish - Forces a connection state to established.
433 * @cm_id: Connection identifier to transition to established.
434 *
435 * This routine should be invoked by users who receive messages on a
436 * connected QP before an RTU has been received.
437 */
438int ib_cm_establish(struct ib_cm_id *cm_id);
439
440/**
441 * ib_send_cm_rej - Sends a connection rejection message to the
442 * remote node.
443 * @cm_id: Connection identifier associated with the connection being
444 * rejected.
445 * @reason: Reason for the connection request rejection.
446 * @ari: Optional additional rejection information.
447 * @ari_length: Size of the additional rejection information, in bytes.
448 * @private_data: Optional user-defined private data sent with the
449 * rejection message.
450 * @private_data_len: Size of the private data buffer, in bytes.
451 */
452int ib_send_cm_rej(struct ib_cm_id *cm_id,
453 enum ib_cm_rej_reason reason,
454 void *ari,
455 u8 ari_length,
456 const void *private_data,
457 u8 private_data_len);
458
459/**
460 * ib_send_cm_mra - Sends a message receipt acknowledgement to a connection
461 * message.
462 * @cm_id: Connection identifier associated with the connection message.
463 * @service_timeout: The maximum time required for the sender to reply to
464 * to the connection message.
465 * @private_data: Optional user-defined private data sent with the
466 * message receipt acknowledgement.
467 * @private_data_len: Size of the private data buffer, in bytes.
468 */
469int ib_send_cm_mra(struct ib_cm_id *cm_id,
470 u8 service_timeout,
471 const void *private_data,
472 u8 private_data_len);
473
474/**
475 * ib_send_cm_lap - Sends a load alternate path request.
476 * @cm_id: Connection identifier associated with the load alternate path
477 * message.
478 * @alternate_path: A path record that identifies the alternate path to
479 * load.
480 * @private_data: Optional user-defined private data sent with the
481 * load alternate path message.
482 * @private_data_len: Size of the private data buffer, in bytes.
483 */
484int ib_send_cm_lap(struct ib_cm_id *cm_id,
485 struct ib_sa_path_rec *alternate_path,
486 const void *private_data,
487 u8 private_data_len);
488
489/**
490 * ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
491 * to a specified QP state.
492 * @cm_id: Communication identifier associated with the QP attributes to
493 * initialize.
494 * @qp_attr: On input, specifies the desired QP state. On output, the
495 * mandatory and desired optional attributes will be set in order to
496 * modify the QP to the specified state.
497 * @qp_attr_mask: The QP attribute mask that may be used to transition the
498 * QP to the specified state.
499 *
500 * Users must set the @qp_attr->qp_state to the desired QP state. This call
501 * will set all required attributes for the given transition, along with
502 * known optional attributes. Users may override the attributes returned from
503 * this call before calling ib_modify_qp.
504 */
505int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
506 struct ib_qp_attr *qp_attr,
507 int *qp_attr_mask);
508
509/**
510 * ib_send_cm_apr - Sends an alternate path response message in response to
511 * a load alternate path request.
512 * @cm_id: Connection identifier associated with the alternate path response.
513 * @status: Reply status sent with the alternate path response.
514 * @info: Optional additional information sent with the alternate path
515 * response.
516 * @info_length: Size of the additional information, in bytes.
517 * @private_data: Optional user-defined private data sent with the
518 * alternate path response message.
519 * @private_data_len: Size of the private data buffer, in bytes.
520 */
521int ib_send_cm_apr(struct ib_cm_id *cm_id,
522 enum ib_cm_apr_status status,
523 void *info,
524 u8 info_length,
525 const void *private_data,
526 u8 private_data_len);
527
528struct ib_cm_sidr_req_param {
529 struct ib_sa_path_rec *path;
530 __be64 service_id;
531 int timeout_ms;
532 const void *private_data;
533 u8 private_data_len;
534 u8 max_cm_retries;
535 u16 pkey;
536};
537
538/**
539 * ib_send_cm_sidr_req - Sends a service ID resolution request to the
540 * remote node.
541 * @cm_id: Communication identifier that will be associated with the
542 * service ID resolution request.
543 * @param: Service ID resolution request information.
544 */
545int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
546 struct ib_cm_sidr_req_param *param);
547
548struct ib_cm_sidr_rep_param {
549 u32 qp_num;
550 u32 qkey;
551 enum ib_cm_sidr_status status;
552 const void *info;
553 u8 info_length;
554 const void *private_data;
555 u8 private_data_len;
556};
557
558/**
559 * ib_send_cm_sidr_rep - Sends a service ID resolution request to the
560 * remote node.
561 * @cm_id: Communication identifier associated with the received service ID
562 * resolution request.
563 * @param: Service ID resolution reply information.
564 */
565int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
566 struct ib_cm_sidr_rep_param *param);
567
568#endif /* IB_CM_H */
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
new file mode 100644
index 000000000000..86b7e93f198b
--- /dev/null
+++ b/include/rdma/ib_fmr_pool.h
@@ -0,0 +1,93 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_fmr_pool.h 2730 2005-06-28 16:43:03Z sean.hefty $
34 */
35
36#if !defined(IB_FMR_POOL_H)
37#define IB_FMR_POOL_H
38
39#include <rdma/ib_verbs.h>
40
41struct ib_fmr_pool;
42
43/**
44 * struct ib_fmr_pool_param - Parameters for creating FMR pool
45 * @max_pages_per_fmr:Maximum number of pages per map request.
46 * @access:Access flags for FMRs in pool.
47 * @pool_size:Number of FMRs to allocate for pool.
48 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
49 * FMRs are present.
50 * @flush_function:Callback called when unmapped FMRs are flushed and
51 * more FMRs are possibly available for mapping
52 * @flush_arg:Context passed to user's flush function.
53 * @cache:If set, FMRs may be reused after unmapping for identical map
54 * requests.
55 */
56struct ib_fmr_pool_param {
57 int max_pages_per_fmr;
58 enum ib_access_flags access;
59 int pool_size;
60 int dirty_watermark;
61 void (*flush_function)(struct ib_fmr_pool *pool,
62 void * arg);
63 void *flush_arg;
64 unsigned cache:1;
65};
66
67struct ib_pool_fmr {
68 struct ib_fmr *fmr;
69 struct ib_fmr_pool *pool;
70 struct list_head list;
71 struct hlist_node cache_node;
72 int ref_count;
73 int remap_count;
74 u64 io_virtual_address;
75 int page_list_len;
76 u64 page_list[0];
77};
78
79struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
80 struct ib_fmr_pool_param *params);
81
82void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
83
84int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
85
86struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
87 u64 *page_list,
88 int list_len,
89 u64 *io_virtual_address);
90
91int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
92
93#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
new file mode 100644
index 000000000000..fc6b1c18ffc6
--- /dev/null
+++ b/include/rdma/ib_mad.h
@@ -0,0 +1,579 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
37 */
38
39#if !defined( IB_MAD_H )
40#define IB_MAD_H
41
42#include <linux/pci.h>
43
44#include <rdma/ib_verbs.h>
45
46/* Management base version */
47#define IB_MGMT_BASE_VERSION 1
48
49/* Management classes */
50#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
51#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
52#define IB_MGMT_CLASS_SUBN_ADM 0x03
53#define IB_MGMT_CLASS_PERF_MGMT 0x04
54#define IB_MGMT_CLASS_BM 0x05
55#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
56#define IB_MGMT_CLASS_CM 0x07
57#define IB_MGMT_CLASS_SNMP 0x08
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60
61#define IB_OPENIB_OUI (0x001405)
62
63/* Management methods */
64#define IB_MGMT_METHOD_GET 0x01
65#define IB_MGMT_METHOD_SET 0x02
66#define IB_MGMT_METHOD_GET_RESP 0x81
67#define IB_MGMT_METHOD_SEND 0x03
68#define IB_MGMT_METHOD_TRAP 0x05
69#define IB_MGMT_METHOD_REPORT 0x06
70#define IB_MGMT_METHOD_REPORT_RESP 0x86
71#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
72
73#define IB_MGMT_METHOD_RESP 0x80
74
75#define IB_MGMT_MAX_METHODS 128
76
77/* RMPP information */
78#define IB_MGMT_RMPP_VERSION 1
79
80#define IB_MGMT_RMPP_TYPE_DATA 1
81#define IB_MGMT_RMPP_TYPE_ACK 2
82#define IB_MGMT_RMPP_TYPE_STOP 3
83#define IB_MGMT_RMPP_TYPE_ABORT 4
84
85#define IB_MGMT_RMPP_FLAG_ACTIVE 1
86#define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
87#define IB_MGMT_RMPP_FLAG_LAST (1<<2)
88
89#define IB_MGMT_RMPP_NO_RESPTIME 0x1F
90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118
94#define IB_MGMT_RMPP_STATUS_T2L 118
95#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
96#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
97#define IB_MGMT_RMPP_STATUS_BADT 121
98#define IB_MGMT_RMPP_STATUS_W2S 122
99#define IB_MGMT_RMPP_STATUS_S2B 123
100#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
101#define IB_MGMT_RMPP_STATUS_UNV 125
102#define IB_MGMT_RMPP_STATUS_TMR 126
103#define IB_MGMT_RMPP_STATUS_UNSPEC 127
104#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127
105
106#define IB_QP0 0
107#define IB_QP1 __constant_htonl(1)
108#define IB_QP1_QKEY 0x80010000
109#define IB_QP_SET_QKEY 0x80000000
110
111struct ib_mad_hdr {
112 u8 base_version;
113 u8 mgmt_class;
114 u8 class_version;
115 u8 method;
116 __be16 status;
117 __be16 class_specific;
118 __be64 tid;
119 __be16 attr_id;
120 __be16 resv;
121 __be32 attr_mod;
122};
123
124struct ib_rmpp_hdr {
125 u8 rmpp_version;
126 u8 rmpp_type;
127 u8 rmpp_rtime_flags;
128 u8 rmpp_status;
129 __be32 seg_num;
130 __be32 paylen_newwin;
131};
132
133typedef u64 __bitwise ib_sa_comp_mask;
134
135#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
136
137/*
138 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
139 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
140 * lay them out wrong otherwise. (And unfortunately they are sent on
141 * the wire so we can't change the layout)
142 */
143struct ib_sa_hdr {
144 __be64 sm_key;
145 __be16 attr_offset;
146 __be16 reserved;
147 ib_sa_comp_mask comp_mask;
148} __attribute__ ((packed));
149
150struct ib_mad {
151 struct ib_mad_hdr mad_hdr;
152 u8 data[232];
153};
154
155struct ib_rmpp_mad {
156 struct ib_mad_hdr mad_hdr;
157 struct ib_rmpp_hdr rmpp_hdr;
158 u8 data[220];
159};
160
161struct ib_sa_mad {
162 struct ib_mad_hdr mad_hdr;
163 struct ib_rmpp_hdr rmpp_hdr;
164 struct ib_sa_hdr sa_hdr;
165 u8 data[200];
166} __attribute__ ((packed));
167
168struct ib_vendor_mad {
169 struct ib_mad_hdr mad_hdr;
170 struct ib_rmpp_hdr rmpp_hdr;
171 u8 reserved;
172 u8 oui[3];
173 u8 data[216];
174};
175
176/**
177 * ib_mad_send_buf - MAD data buffer and work request for sends.
178 * @mad: References an allocated MAD data buffer. The size of the data
179 * buffer is specified in the @send_wr.length field.
180 * @mapping: DMA mapping information.
181 * @mad_agent: MAD agent that allocated the buffer.
182 * @context: User-controlled context fields.
183 * @send_wr: An initialized work request structure used when sending the MAD.
184 * The wr_id field of the work request is initialized to reference this
185 * data structure.
186 * @sge: A scatter-gather list referenced by the work request.
187 *
188 * Users are responsible for initializing the MAD buffer itself, with the
189 * exception of specifying the payload length field in any RMPP MAD.
190 */
191struct ib_mad_send_buf {
192 struct ib_mad *mad;
193 DECLARE_PCI_UNMAP_ADDR(mapping)
194 struct ib_mad_agent *mad_agent;
195 void *context[2];
196 struct ib_send_wr send_wr;
197 struct ib_sge sge;
198};
199
200/**
201 * ib_get_rmpp_resptime - Returns the RMPP response time.
202 * @rmpp_hdr: An RMPP header.
203 */
204static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
205{
206 return rmpp_hdr->rmpp_rtime_flags >> 3;
207}
208
209/**
210 * ib_get_rmpp_flags - Returns the RMPP flags.
211 * @rmpp_hdr: An RMPP header.
212 */
213static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
214{
215 return rmpp_hdr->rmpp_rtime_flags & 0x7;
216}
217
218/**
219 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
220 * @rmpp_hdr: An RMPP header.
221 * @rtime: The response time to set.
222 */
223static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
224{
225 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
226}
227
228/**
229 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
230 * @rmpp_hdr: An RMPP header.
231 * @flags: The flags to set.
232 */
233static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
234{
235 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
236 (flags & 0x7);
237}
238
239struct ib_mad_agent;
240struct ib_mad_send_wc;
241struct ib_mad_recv_wc;
242
243/**
244 * ib_mad_send_handler - callback handler for a sent MAD.
245 * @mad_agent: MAD agent that sent the MAD.
246 * @mad_send_wc: Send work completion information on the sent MAD.
247 */
248typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
249 struct ib_mad_send_wc *mad_send_wc);
250
251/**
252 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
253 * @mad_agent: MAD agent that snooped the MAD.
254 * @send_wr: Work request information on the sent MAD.
255 * @mad_send_wc: Work completion information on the sent MAD. Valid
256 * only for snooping that occurs on a send completion.
257 *
258 * Clients snooping MADs should not modify data referenced by the @send_wr
259 * or @mad_send_wc.
260 */
261typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
262 struct ib_send_wr *send_wr,
263 struct ib_mad_send_wc *mad_send_wc);
264
265/**
266 * ib_mad_recv_handler - callback handler for a received MAD.
267 * @mad_agent: MAD agent requesting the received MAD.
268 * @mad_recv_wc: Received work completion information on the received MAD.
269 *
270 * MADs received in response to a send request operation will be handed to
271 * the user after the send operation completes. All data buffers given
272 * to registered agents through this routine are owned by the receiving
273 * client, except for snooping agents. Clients snooping MADs should not
274 * modify the data referenced by @mad_recv_wc.
275 */
276typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
277 struct ib_mad_recv_wc *mad_recv_wc);
278
279/**
280 * ib_mad_agent - Used to track MAD registration with the access layer.
281 * @device: Reference to device registration is on.
282 * @qp: Reference to QP used for sending and receiving MADs.
283 * @mr: Memory region for system memory usable for DMA.
284 * @recv_handler: Callback handler for a received MAD.
285 * @send_handler: Callback handler for a sent MAD.
286 * @snoop_handler: Callback handler for snooped sent MADs.
287 * @context: User-specified context associated with this registration.
288 * @hi_tid: Access layer assigned transaction ID for this client.
289 * Unsolicited MADs sent by this client will have the upper 32-bits
290 * of their TID set to this value.
291 * @port_num: Port number on which QP is registered
292 * @rmpp_version: If set, indicates the RMPP version used by this agent.
293 */
294struct ib_mad_agent {
295 struct ib_device *device;
296 struct ib_qp *qp;
297 struct ib_mr *mr;
298 ib_mad_recv_handler recv_handler;
299 ib_mad_send_handler send_handler;
300 ib_mad_snoop_handler snoop_handler;
301 void *context;
302 u32 hi_tid;
303 u8 port_num;
304 u8 rmpp_version;
305};
306
307/**
308 * ib_mad_send_wc - MAD send completion information.
309 * @wr_id: Work request identifier associated with the send MAD request.
310 * @status: Completion status.
311 * @vendor_err: Optional vendor error information returned with a failed
312 * request.
313 */
314struct ib_mad_send_wc {
315 u64 wr_id;
316 enum ib_wc_status status;
317 u32 vendor_err;
318};
319
320/**
321 * ib_mad_recv_buf - received MAD buffer information.
322 * @list: Reference to next data buffer for a received RMPP MAD.
323 * @grh: References a data buffer containing the global route header.
324 * The data refereced by this buffer is only valid if the GRH is
325 * valid.
326 * @mad: References the start of the received MAD.
327 */
328struct ib_mad_recv_buf {
329 struct list_head list;
330 struct ib_grh *grh;
331 struct ib_mad *mad;
332};
333
334/**
335 * ib_mad_recv_wc - received MAD information.
336 * @wc: Completion information for the received data.
337 * @recv_buf: Specifies the location of the received data buffer(s).
338 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
339 * @mad_len: The length of the received MAD, without duplicated headers.
340 *
341 * For received response, the wr_id field of the wc is set to the wr_id
342 * for the corresponding send request.
343 */
344struct ib_mad_recv_wc {
345 struct ib_wc *wc;
346 struct ib_mad_recv_buf recv_buf;
347 struct list_head rmpp_list;
348 int mad_len;
349};
350
351/**
352 * ib_mad_reg_req - MAD registration request
353 * @mgmt_class: Indicates which management class of MADs should be receive
354 * by the caller. This field is only required if the user wishes to
355 * receive unsolicited MADs, otherwise it should be 0.
356 * @mgmt_class_version: Indicates which version of MADs for the given
357 * management class to receive.
358 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
359 * in the range from 0x30 to 0x4f. Otherwise not used.
360 * @method_mask: The caller will receive unsolicited MADs for any method
361 * where @method_mask = 1.
362 */
363struct ib_mad_reg_req {
364 u8 mgmt_class;
365 u8 mgmt_class_version;
366 u8 oui[3];
367 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
368};
369
370/**
371 * ib_register_mad_agent - Register to send/receive MADs.
372 * @device: The device to register with.
373 * @port_num: The port on the specified device to use.
374 * @qp_type: Specifies which QP to access. Must be either
375 * IB_QPT_SMI or IB_QPT_GSI.
376 * @mad_reg_req: Specifies which unsolicited MADs should be received
377 * by the caller. This parameter may be NULL if the caller only
378 * wishes to receive solicited responses.
379 * @rmpp_version: If set, indicates that the client will send
380 * and receive MADs that contain the RMPP header for the given version.
381 * If set to 0, indicates that RMPP is not used by this client.
382 * @send_handler: The completion callback routine invoked after a send
383 * request has completed.
384 * @recv_handler: The completion callback routine invoked for a received
385 * MAD.
386 * @context: User specified context associated with the registration.
387 */
388struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
389 u8 port_num,
390 enum ib_qp_type qp_type,
391 struct ib_mad_reg_req *mad_reg_req,
392 u8 rmpp_version,
393 ib_mad_send_handler send_handler,
394 ib_mad_recv_handler recv_handler,
395 void *context);
396
397enum ib_mad_snoop_flags {
398 /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
399 /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
400 IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
401 /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
402 IB_MAD_SNOOP_RECVS = (1<<4)
403 /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
404 /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
405};
406
407/**
408 * ib_register_mad_snoop - Register to snoop sent and received MADs.
409 * @device: The device to register with.
410 * @port_num: The port on the specified device to use.
411 * @qp_type: Specifies which QP traffic to snoop. Must be either
412 * IB_QPT_SMI or IB_QPT_GSI.
413 * @mad_snoop_flags: Specifies information where snooping occurs.
414 * @send_handler: The callback routine invoked for a snooped send.
415 * @recv_handler: The callback routine invoked for a snooped receive.
416 * @context: User specified context associated with the registration.
417 */
418struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
419 u8 port_num,
420 enum ib_qp_type qp_type,
421 int mad_snoop_flags,
422 ib_mad_snoop_handler snoop_handler,
423 ib_mad_recv_handler recv_handler,
424 void *context);
425
426/**
427 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
428 * @mad_agent: Corresponding MAD registration request to deregister.
429 *
430 * After invoking this routine, MAD services are no longer usable by the
431 * client on the associated QP.
432 */
433int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
434
435/**
436 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
437 * with the registered client.
438 * @mad_agent: Specifies the associated registration to post the send to.
439 * @send_wr: Specifies the information needed to send the MAD(s).
440 * @bad_send_wr: Specifies the MAD on which an error was encountered.
441 *
442 * Sent MADs are not guaranteed to complete in the order that they were posted.
443 *
444 * If the MAD requires RMPP, the data buffer should contain a single copy
445 * of the common MAD, RMPP, and class specific headers, followed by the class
446 * defined data. If the class defined data would not divide evenly into
447 * RMPP segments, then space must be allocated at the end of the referenced
448 * buffer for any required padding. To indicate the amount of class defined
449 * data being transferred, the paylen_newwin field in the RMPP header should
450 * be set to the size of the class specific header plus the amount of class
451 * defined data being transferred. The paylen_newwin field should be
452 * specified in network-byte order.
453 */
454int ib_post_send_mad(struct ib_mad_agent *mad_agent,
455 struct ib_send_wr *send_wr,
456 struct ib_send_wr **bad_send_wr);
457
458/**
459 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
460 * @mad_recv_wc: Work completion information for a received MAD.
461 * @buf: User-provided data buffer to receive the coalesced buffers. The
462 * referenced buffer should be at least the size of the mad_len specified
463 * by @mad_recv_wc.
464 *
465 * This call copies a chain of received MAD segments into a single data buffer,
466 * removing duplicated headers.
467 */
468void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
469
470/**
471 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
472 * @mad_recv_wc: Work completion information for a received MAD.
473 *
474 * Clients receiving MADs through their ib_mad_recv_handler must call this
475 * routine to return the work completion buffers to the access layer.
476 */
477void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
478
479/**
480 * ib_cancel_mad - Cancels an outstanding send MAD operation.
481 * @mad_agent: Specifies the registration associated with sent MAD.
482 * @wr_id: Indicates the work request identifier of the MAD to cancel.
483 *
484 * MADs will be returned to the user through the corresponding
485 * ib_mad_send_handler.
486 */
487void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id);
488
489/**
490 * ib_modify_mad - Modifies an outstanding send MAD operation.
491 * @mad_agent: Specifies the registration associated with sent MAD.
492 * @wr_id: Indicates the work request identifier of the MAD to modify.
493 * @timeout_ms: New timeout value for sent MAD.
494 *
495 * This call will reset the timeout value for a sent MAD to the specified
496 * value.
497 */
498int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms);
499
500/**
501 * ib_redirect_mad_qp - Registers a QP for MAD services.
502 * @qp: Reference to a QP that requires MAD services.
503 * @rmpp_version: If set, indicates that the client will send
504 * and receive MADs that contain the RMPP header for the given version.
505 * If set to 0, indicates that RMPP is not used by this client.
506 * @send_handler: The completion callback routine invoked after a send
507 * request has completed.
508 * @recv_handler: The completion callback routine invoked for a received
509 * MAD.
510 * @context: User specified context associated with the registration.
511 *
512 * Use of this call allows clients to use MAD services, such as RMPP,
513 * on user-owned QPs. After calling this routine, users may send
514 * MADs on the specified QP by calling ib_mad_post_send.
515 */
516struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
517 u8 rmpp_version,
518 ib_mad_send_handler send_handler,
519 ib_mad_recv_handler recv_handler,
520 void *context);
521
522/**
523 * ib_process_mad_wc - Processes a work completion associated with a
524 * MAD sent or received on a redirected QP.
525 * @mad_agent: Specifies the registered MAD service using the redirected QP.
526 * @wc: References a work completion associated with a sent or received
527 * MAD segment.
528 *
529 * This routine is used to complete or continue processing on a MAD request.
530 * If the work completion is associated with a send operation, calling
531 * this routine is required to continue an RMPP transfer or to wait for a
532 * corresponding response, if it is a request. If the work completion is
533 * associated with a receive operation, calling this routine is required to
534 * process an inbound or outbound RMPP transfer, or to match a response MAD
535 * with its corresponding request.
536 */
537int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
538 struct ib_wc *wc);
539
540/**
541 * ib_create_send_mad - Allocate and initialize a data buffer and work request
542 * for sending a MAD.
543 * @mad_agent: Specifies the registered MAD service to associate with the MAD.
544 * @remote_qpn: Specifies the QPN of the receiving node.
545 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
546 * is valid only if the remote_qpn is QP 1.
547 * @ah: References the address handle used to transfer to the remote node.
548 * @rmpp_active: Indicates if the send will enable RMPP.
549 * @hdr_len: Indicates the size of the data header of the MAD. This length
550 * should include the common MAD header, RMPP header, plus any class
551 * specific header.
552 * @data_len: Indicates the size of any user-transferred data. The call will
553 * automatically adjust the allocated buffer size to account for any
554 * additional padding that may be necessary.
555 * @gfp_mask: GFP mask used for the memory allocation.
556 *
557 * This is a helper routine that may be used to allocate a MAD. Users are
558 * not required to allocate outbound MADs using this call. The returned
559 * MAD send buffer will reference a data buffer usable for sending a MAD, along
560 * with an initialized work request structure. Users may modify the returned
561 * MAD data buffer or work request before posting the send.
562 *
563 * The returned data buffer will be cleared. Users are responsible for
564 * initializing the common MAD and any class specific headers. If @rmpp_active
565 * is set, the RMPP header will be initialized for sending.
566 */
567struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
568 u32 remote_qpn, u16 pkey_index,
569 struct ib_ah *ah, int rmpp_active,
570 int hdr_len, int data_len,
571 unsigned int __nocast gfp_mask);
572
573/**
574 * ib_free_send_mad - Returns data buffers used to send a MAD.
575 * @send_buf: Previously allocated send data buffer.
576 */
577void ib_free_send_mad(struct ib_mad_send_buf *send_buf);
578
579#endif /* IB_MAD_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
new file mode 100644
index 000000000000..f926020d6331
--- /dev/null
+++ b/include/rdma/ib_pack.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef IB_PACK_H
36#define IB_PACK_H
37
38#include <rdma/ib_verbs.h>
39
40enum {
41 IB_LRH_BYTES = 8,
42 IB_GRH_BYTES = 40,
43 IB_BTH_BYTES = 12,
44 IB_DETH_BYTES = 8
45};
46
47struct ib_field {
48 size_t struct_offset_bytes;
49 size_t struct_size_bytes;
50 int offset_words;
51 int offset_bits;
52 int size_bits;
53 char *field_name;
54};
55
56#define RESERVED \
57 .field_name = "reserved"
58
59/*
60 * This macro cleans up the definitions of constants for BTH opcodes.
61 * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
62 * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
63 * the correct value.
64 *
65 * In short, user code should use the constants defined using the
66 * macro rather than worrying about adding together other constants.
67*/
68#define IB_OPCODE(transport, op) \
69 IB_OPCODE_ ## transport ## _ ## op = \
70 IB_OPCODE_ ## transport + IB_OPCODE_ ## op
71
72enum {
73 /* transport types -- just used to define real constants */
74 IB_OPCODE_RC = 0x00,
75 IB_OPCODE_UC = 0x20,
76 IB_OPCODE_RD = 0x40,
77 IB_OPCODE_UD = 0x60,
78
79 /* operations -- just used to define real constants */
80 IB_OPCODE_SEND_FIRST = 0x00,
81 IB_OPCODE_SEND_MIDDLE = 0x01,
82 IB_OPCODE_SEND_LAST = 0x02,
83 IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
84 IB_OPCODE_SEND_ONLY = 0x04,
85 IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
86 IB_OPCODE_RDMA_WRITE_FIRST = 0x06,
87 IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
88 IB_OPCODE_RDMA_WRITE_LAST = 0x08,
89 IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
90 IB_OPCODE_RDMA_WRITE_ONLY = 0x0a,
91 IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
92 IB_OPCODE_RDMA_READ_REQUEST = 0x0c,
93 IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
94 IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
95 IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
96 IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
97 IB_OPCODE_ACKNOWLEDGE = 0x11,
98 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
99 IB_OPCODE_COMPARE_SWAP = 0x13,
100 IB_OPCODE_FETCH_ADD = 0x14,
101
102 /* real constants follow -- see comment about above IB_OPCODE()
103 macro for more details */
104
105 /* RC */
106 IB_OPCODE(RC, SEND_FIRST),
107 IB_OPCODE(RC, SEND_MIDDLE),
108 IB_OPCODE(RC, SEND_LAST),
109 IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
110 IB_OPCODE(RC, SEND_ONLY),
111 IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
112 IB_OPCODE(RC, RDMA_WRITE_FIRST),
113 IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
114 IB_OPCODE(RC, RDMA_WRITE_LAST),
115 IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
116 IB_OPCODE(RC, RDMA_WRITE_ONLY),
117 IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
118 IB_OPCODE(RC, RDMA_READ_REQUEST),
119 IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
120 IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
121 IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
122 IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
123 IB_OPCODE(RC, ACKNOWLEDGE),
124 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
125 IB_OPCODE(RC, COMPARE_SWAP),
126 IB_OPCODE(RC, FETCH_ADD),
127
128 /* UC */
129 IB_OPCODE(UC, SEND_FIRST),
130 IB_OPCODE(UC, SEND_MIDDLE),
131 IB_OPCODE(UC, SEND_LAST),
132 IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
133 IB_OPCODE(UC, SEND_ONLY),
134 IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
135 IB_OPCODE(UC, RDMA_WRITE_FIRST),
136 IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
137 IB_OPCODE(UC, RDMA_WRITE_LAST),
138 IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
139 IB_OPCODE(UC, RDMA_WRITE_ONLY),
140 IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
141
142 /* RD */
143 IB_OPCODE(RD, SEND_FIRST),
144 IB_OPCODE(RD, SEND_MIDDLE),
145 IB_OPCODE(RD, SEND_LAST),
146 IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
147 IB_OPCODE(RD, SEND_ONLY),
148 IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
149 IB_OPCODE(RD, RDMA_WRITE_FIRST),
150 IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
151 IB_OPCODE(RD, RDMA_WRITE_LAST),
152 IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
153 IB_OPCODE(RD, RDMA_WRITE_ONLY),
154 IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
155 IB_OPCODE(RD, RDMA_READ_REQUEST),
156 IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
157 IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
158 IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
159 IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
160 IB_OPCODE(RD, ACKNOWLEDGE),
161 IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
162 IB_OPCODE(RD, COMPARE_SWAP),
163 IB_OPCODE(RD, FETCH_ADD),
164
165 /* UD */
166 IB_OPCODE(UD, SEND_ONLY),
167 IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
168};
169
170enum {
171 IB_LNH_RAW = 0,
172 IB_LNH_IP = 1,
173 IB_LNH_IBA_LOCAL = 2,
174 IB_LNH_IBA_GLOBAL = 3
175};
176
177struct ib_unpacked_lrh {
178 u8 virtual_lane;
179 u8 link_version;
180 u8 service_level;
181 u8 link_next_header;
182 __be16 destination_lid;
183 __be16 packet_length;
184 __be16 source_lid;
185};
186
187struct ib_unpacked_grh {
188 u8 ip_version;
189 u8 traffic_class;
190 __be32 flow_label;
191 __be16 payload_length;
192 u8 next_header;
193 u8 hop_limit;
194 union ib_gid source_gid;
195 union ib_gid destination_gid;
196};
197
198struct ib_unpacked_bth {
199 u8 opcode;
200 u8 solicited_event;
201 u8 mig_req;
202 u8 pad_count;
203 u8 transport_header_version;
204 __be16 pkey;
205 __be32 destination_qpn;
206 u8 ack_req;
207 __be32 psn;
208};
209
210struct ib_unpacked_deth {
211 __be32 qkey;
212 __be32 source_qpn;
213};
214
215struct ib_ud_header {
216 struct ib_unpacked_lrh lrh;
217 int grh_present;
218 struct ib_unpacked_grh grh;
219 struct ib_unpacked_bth bth;
220 struct ib_unpacked_deth deth;
221 int immediate_present;
222 __be32 immediate_data;
223};
224
225void ib_pack(const struct ib_field *desc,
226 int desc_len,
227 void *structure,
228 void *buf);
229
230void ib_unpack(const struct ib_field *desc,
231 int desc_len,
232 void *buf,
233 void *structure);
234
235void ib_ud_header_init(int payload_bytes,
236 int grh_present,
237 struct ib_ud_header *header);
238
239int ib_ud_header_pack(struct ib_ud_header *header,
240 void *buf);
241
242int ib_ud_header_unpack(void *buf,
243 struct ib_ud_header *header);
244
245#endif /* IB_PACK_H */
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
new file mode 100644
index 000000000000..c022edfc49da
--- /dev/null
+++ b/include/rdma/ib_sa.h
@@ -0,0 +1,373 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_sa.h 2811 2005-07-06 18:11:43Z halr $
34 */
35
36#ifndef IB_SA_H
37#define IB_SA_H
38
39#include <linux/compiler.h>
40
41#include <rdma/ib_verbs.h>
42#include <rdma/ib_mad.h>
43
44enum {
45 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
46
47 IB_SA_METHOD_GET_TABLE = 0x12,
48 IB_SA_METHOD_GET_TABLE_RESP = 0x92,
49 IB_SA_METHOD_DELETE = 0x15
50};
51
52enum ib_sa_selector {
53 IB_SA_GTE = 0,
54 IB_SA_LTE = 1,
55 IB_SA_EQ = 2,
56 /*
57 * The meaning of "best" depends on the attribute: for
58 * example, for MTU best will return the largest available
59 * MTU, while for packet life time, best will return the
60 * smallest available life time.
61 */
62 IB_SA_BEST = 3
63};
64
65enum ib_sa_rate {
66 IB_SA_RATE_2_5_GBPS = 2,
67 IB_SA_RATE_5_GBPS = 5,
68 IB_SA_RATE_10_GBPS = 3,
69 IB_SA_RATE_20_GBPS = 6,
70 IB_SA_RATE_30_GBPS = 4,
71 IB_SA_RATE_40_GBPS = 7,
72 IB_SA_RATE_60_GBPS = 8,
73 IB_SA_RATE_80_GBPS = 9,
74 IB_SA_RATE_120_GBPS = 10
75};
76
77static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
78{
79 switch (rate) {
80 case IB_SA_RATE_2_5_GBPS: return 1;
81 case IB_SA_RATE_5_GBPS: return 2;
82 case IB_SA_RATE_10_GBPS: return 4;
83 case IB_SA_RATE_20_GBPS: return 8;
84 case IB_SA_RATE_30_GBPS: return 12;
85 case IB_SA_RATE_40_GBPS: return 16;
86 case IB_SA_RATE_60_GBPS: return 24;
87 case IB_SA_RATE_80_GBPS: return 32;
88 case IB_SA_RATE_120_GBPS: return 48;
89 default: return -1;
90 }
91}
92
93/*
94 * Structures for SA records are named "struct ib_sa_xxx_rec." No
95 * attempt is made to pack structures to match the physical layout of
96 * SA records in SA MADs; all packing and unpacking is handled by the
97 * SA query code.
98 *
99 * For a record with structure ib_sa_xxx_rec, the naming convention
100 * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we
101 * never use different abbreviations or otherwise change the spelling
102 * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY).
103 *
104 * Reserved rows are indicated with comments to help maintainability.
105 */
106
107/* reserved: 0 */
108/* reserved: 1 */
109#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2)
110#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3)
111#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4)
112#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5)
113#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6)
114/* reserved: 7 */
115#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8)
116#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9)
117#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10)
118#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11)
119#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12)
120#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13)
121/* reserved: 14 */
122#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15)
123#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16)
124#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17)
125#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18)
126#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19)
127#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20)
128#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21)
129#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22)
130
131struct ib_sa_path_rec {
132 /* reserved */
133 /* reserved */
134 union ib_gid dgid;
135 union ib_gid sgid;
136 __be16 dlid;
137 __be16 slid;
138 int raw_traffic;
139 /* reserved */
140 __be32 flow_label;
141 u8 hop_limit;
142 u8 traffic_class;
143 int reversible;
144 u8 numb_path;
145 __be16 pkey;
146 /* reserved */
147 u8 sl;
148 u8 mtu_selector;
149 u8 mtu;
150 u8 rate_selector;
151 u8 rate;
152 u8 packet_life_time_selector;
153 u8 packet_life_time;
154 u8 preference;
155};
156
157#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
158#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1)
159#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2)
160#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3)
161#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4)
162#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5)
163#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6)
164#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7)
165#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8)
166#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9)
167#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10)
168#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11)
169#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12)
170#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13)
171#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14)
172#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15)
173#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16)
174#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17)
175
176struct ib_sa_mcmember_rec {
177 union ib_gid mgid;
178 union ib_gid port_gid;
179 __be32 qkey;
180 __be16 mlid;
181 u8 mtu_selector;
182 u8 mtu;
183 u8 traffic_class;
184 __be16 pkey;
185 u8 rate_selector;
186 u8 rate;
187 u8 packet_life_time_selector;
188 u8 packet_life_time;
189 u8 sl;
190 __be32 flow_label;
191 u8 hop_limit;
192 u8 scope;
193 u8 join_state;
194 int proxy_join;
195};
196
197/* Service Record Component Mask Sec 15.2.5.14 Ver 1.1 */
198#define IB_SA_SERVICE_REC_SERVICE_ID IB_SA_COMP_MASK( 0)
199#define IB_SA_SERVICE_REC_SERVICE_GID IB_SA_COMP_MASK( 1)
200#define IB_SA_SERVICE_REC_SERVICE_PKEY IB_SA_COMP_MASK( 2)
201/* reserved: 3 */
202#define IB_SA_SERVICE_REC_SERVICE_LEASE IB_SA_COMP_MASK( 4)
203#define IB_SA_SERVICE_REC_SERVICE_KEY IB_SA_COMP_MASK( 5)
204#define IB_SA_SERVICE_REC_SERVICE_NAME IB_SA_COMP_MASK( 6)
205#define IB_SA_SERVICE_REC_SERVICE_DATA8_0 IB_SA_COMP_MASK( 7)
206#define IB_SA_SERVICE_REC_SERVICE_DATA8_1 IB_SA_COMP_MASK( 8)
207#define IB_SA_SERVICE_REC_SERVICE_DATA8_2 IB_SA_COMP_MASK( 9)
208#define IB_SA_SERVICE_REC_SERVICE_DATA8_3 IB_SA_COMP_MASK(10)
209#define IB_SA_SERVICE_REC_SERVICE_DATA8_4 IB_SA_COMP_MASK(11)
210#define IB_SA_SERVICE_REC_SERVICE_DATA8_5 IB_SA_COMP_MASK(12)
211#define IB_SA_SERVICE_REC_SERVICE_DATA8_6 IB_SA_COMP_MASK(13)
212#define IB_SA_SERVICE_REC_SERVICE_DATA8_7 IB_SA_COMP_MASK(14)
213#define IB_SA_SERVICE_REC_SERVICE_DATA8_8 IB_SA_COMP_MASK(15)
214#define IB_SA_SERVICE_REC_SERVICE_DATA8_9 IB_SA_COMP_MASK(16)
215#define IB_SA_SERVICE_REC_SERVICE_DATA8_10 IB_SA_COMP_MASK(17)
216#define IB_SA_SERVICE_REC_SERVICE_DATA8_11 IB_SA_COMP_MASK(18)
217#define IB_SA_SERVICE_REC_SERVICE_DATA8_12 IB_SA_COMP_MASK(19)
218#define IB_SA_SERVICE_REC_SERVICE_DATA8_13 IB_SA_COMP_MASK(20)
219#define IB_SA_SERVICE_REC_SERVICE_DATA8_14 IB_SA_COMP_MASK(21)
220#define IB_SA_SERVICE_REC_SERVICE_DATA8_15 IB_SA_COMP_MASK(22)
221#define IB_SA_SERVICE_REC_SERVICE_DATA16_0 IB_SA_COMP_MASK(23)
222#define IB_SA_SERVICE_REC_SERVICE_DATA16_1 IB_SA_COMP_MASK(24)
223#define IB_SA_SERVICE_REC_SERVICE_DATA16_2 IB_SA_COMP_MASK(25)
224#define IB_SA_SERVICE_REC_SERVICE_DATA16_3 IB_SA_COMP_MASK(26)
225#define IB_SA_SERVICE_REC_SERVICE_DATA16_4 IB_SA_COMP_MASK(27)
226#define IB_SA_SERVICE_REC_SERVICE_DATA16_5 IB_SA_COMP_MASK(28)
227#define IB_SA_SERVICE_REC_SERVICE_DATA16_6 IB_SA_COMP_MASK(29)
228#define IB_SA_SERVICE_REC_SERVICE_DATA16_7 IB_SA_COMP_MASK(30)
229#define IB_SA_SERVICE_REC_SERVICE_DATA32_0 IB_SA_COMP_MASK(31)
230#define IB_SA_SERVICE_REC_SERVICE_DATA32_1 IB_SA_COMP_MASK(32)
231#define IB_SA_SERVICE_REC_SERVICE_DATA32_2 IB_SA_COMP_MASK(33)
232#define IB_SA_SERVICE_REC_SERVICE_DATA32_3 IB_SA_COMP_MASK(34)
233#define IB_SA_SERVICE_REC_SERVICE_DATA64_0 IB_SA_COMP_MASK(35)
234#define IB_SA_SERVICE_REC_SERVICE_DATA64_1 IB_SA_COMP_MASK(36)
235
236#define IB_DEFAULT_SERVICE_LEASE 0xFFFFFFFF
237
238struct ib_sa_service_rec {
239 u64 id;
240 union ib_gid gid;
241 __be16 pkey;
242 /* reserved */
243 u32 lease;
244 u8 key[16];
245 u8 name[64];
246 u8 data8[16];
247 u16 data16[8];
248 u32 data32[4];
249 u64 data64[2];
250};
251
252struct ib_sa_query;
253
254void ib_sa_cancel_query(int id, struct ib_sa_query *query);
255
256int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
257 struct ib_sa_path_rec *rec,
258 ib_sa_comp_mask comp_mask,
259 int timeout_ms, unsigned int __nocast gfp_mask,
260 void (*callback)(int status,
261 struct ib_sa_path_rec *resp,
262 void *context),
263 void *context,
264 struct ib_sa_query **query);
265
266int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
267 u8 method,
268 struct ib_sa_mcmember_rec *rec,
269 ib_sa_comp_mask comp_mask,
270 int timeout_ms, unsigned int __nocast gfp_mask,
271 void (*callback)(int status,
272 struct ib_sa_mcmember_rec *resp,
273 void *context),
274 void *context,
275 struct ib_sa_query **query);
276
277int ib_sa_service_rec_query(struct ib_device *device, u8 port_num,
278 u8 method,
279 struct ib_sa_service_rec *rec,
280 ib_sa_comp_mask comp_mask,
281 int timeout_ms, unsigned int __nocast gfp_mask,
282 void (*callback)(int status,
283 struct ib_sa_service_rec *resp,
284 void *context),
285 void *context,
286 struct ib_sa_query **sa_query);
287
288/**
289 * ib_sa_mcmember_rec_set - Start an MCMember set query
290 * @device:device to send query on
291 * @port_num: port number to send query on
292 * @rec:MCMember Record to send in query
293 * @comp_mask:component mask to send in query
294 * @timeout_ms:time to wait for response
295 * @gfp_mask:GFP mask to use for internal allocations
296 * @callback:function called when query completes, times out or is
297 * canceled
298 * @context:opaque user context passed to callback
299 * @sa_query:query context, used to cancel query
300 *
301 * Send an MCMember Set query to the SA (eg to join a multicast
302 * group). The callback function will be called when the query
303 * completes (or fails); status is 0 for a successful response, -EINTR
304 * if the query is canceled, -ETIMEDOUT is the query timed out, or
305 * -EIO if an error occurred sending the query. The resp parameter of
306 * the callback is only valid if status is 0.
307 *
308 * If the return value of ib_sa_mcmember_rec_set() is negative, it is
309 * an error code. Otherwise it is a query ID that can be used to
310 * cancel the query.
311 */
312static inline int
313ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
314 struct ib_sa_mcmember_rec *rec,
315 ib_sa_comp_mask comp_mask,
316 int timeout_ms, unsigned int __nocast gfp_mask,
317 void (*callback)(int status,
318 struct ib_sa_mcmember_rec *resp,
319 void *context),
320 void *context,
321 struct ib_sa_query **query)
322{
323 return ib_sa_mcmember_rec_query(device, port_num,
324 IB_MGMT_METHOD_SET,
325 rec, comp_mask,
326 timeout_ms, gfp_mask, callback,
327 context, query);
328}
329
330/**
331 * ib_sa_mcmember_rec_delete - Start an MCMember delete query
332 * @device:device to send query on
333 * @port_num: port number to send query on
334 * @rec:MCMember Record to send in query
335 * @comp_mask:component mask to send in query
336 * @timeout_ms:time to wait for response
337 * @gfp_mask:GFP mask to use for internal allocations
338 * @callback:function called when query completes, times out or is
339 * canceled
340 * @context:opaque user context passed to callback
341 * @sa_query:query context, used to cancel query
342 *
343 * Send an MCMember Delete query to the SA (eg to leave a multicast
344 * group). The callback function will be called when the query
345 * completes (or fails); status is 0 for a successful response, -EINTR
346 * if the query is canceled, -ETIMEDOUT is the query timed out, or
347 * -EIO if an error occurred sending the query. The resp parameter of
348 * the callback is only valid if status is 0.
349 *
350 * If the return value of ib_sa_mcmember_rec_delete() is negative, it
351 * is an error code. Otherwise it is a query ID that can be used to
352 * cancel the query.
353 */
354static inline int
355ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
356 struct ib_sa_mcmember_rec *rec,
357 ib_sa_comp_mask comp_mask,
358 int timeout_ms, unsigned int __nocast gfp_mask,
359 void (*callback)(int status,
360 struct ib_sa_mcmember_rec *resp,
361 void *context),
362 void *context,
363 struct ib_sa_query **query)
364{
365 return ib_sa_mcmember_rec_query(device, port_num,
366 IB_SA_METHOD_DELETE,
367 rec, comp_mask,
368 timeout_ms, gfp_mask, callback,
369 context, query);
370}
371
372
373#endif /* IB_SA_H */
diff --git a/include/rdma/ib_smi.h b/include/rdma/ib_smi.h
new file mode 100644
index 000000000000..87f60737f695
--- /dev/null
+++ b/include/rdma/ib_smi.h
@@ -0,0 +1,94 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $
37 */
38
39#if !defined( IB_SMI_H )
40#define IB_SMI_H
41
42#include <rdma/ib_mad.h>
43
44#define IB_SMP_DATA_SIZE 64
45#define IB_SMP_MAX_PATH_HOPS 64
46
47struct ib_smp {
48 u8 base_version;
49 u8 mgmt_class;
50 u8 class_version;
51 u8 method;
52 __be16 status;
53 u8 hop_ptr;
54 u8 hop_cnt;
55 __be64 tid;
56 __be16 attr_id;
57 __be16 resv;
58 __be32 attr_mod;
59 __be64 mkey;
60 __be16 dr_slid;
61 __be16 dr_dlid;
62 u8 reserved[28];
63 u8 data[IB_SMP_DATA_SIZE];
64 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
65 u8 return_path[IB_SMP_MAX_PATH_HOPS];
66} __attribute__ ((packed));
67
68#define IB_SMP_DIRECTION __constant_htons(0x8000)
69
70/* Subnet management attributes */
71#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002)
72#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010)
73#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011)
74#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012)
75#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014)
76#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015)
77#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016)
78#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017)
79#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018)
80#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019)
81#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A)
82#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B)
83#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020)
84#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030)
85#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031)
86#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00)
87
88static inline u8
89ib_get_smp_direction(struct ib_smp *smp)
90{
91 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
92}
93
94#endif /* IB_SMI_H */
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h
new file mode 100644
index 000000000000..72182d16778b
--- /dev/null
+++ b/include/rdma/ib_user_cm.h
@@ -0,0 +1,328 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_user_cm.h 2576 2005-06-09 17:00:30Z libor $
33 */
34
35#ifndef IB_USER_CM_H
36#define IB_USER_CM_H
37
38#include <linux/types.h>
39
40#define IB_USER_CM_ABI_VERSION 1
41
42enum {
43 IB_USER_CM_CMD_CREATE_ID,
44 IB_USER_CM_CMD_DESTROY_ID,
45 IB_USER_CM_CMD_ATTR_ID,
46
47 IB_USER_CM_CMD_LISTEN,
48 IB_USER_CM_CMD_ESTABLISH,
49
50 IB_USER_CM_CMD_SEND_REQ,
51 IB_USER_CM_CMD_SEND_REP,
52 IB_USER_CM_CMD_SEND_RTU,
53 IB_USER_CM_CMD_SEND_DREQ,
54 IB_USER_CM_CMD_SEND_DREP,
55 IB_USER_CM_CMD_SEND_REJ,
56 IB_USER_CM_CMD_SEND_MRA,
57 IB_USER_CM_CMD_SEND_LAP,
58 IB_USER_CM_CMD_SEND_APR,
59 IB_USER_CM_CMD_SEND_SIDR_REQ,
60 IB_USER_CM_CMD_SEND_SIDR_REP,
61
62 IB_USER_CM_CMD_EVENT,
63};
64/*
65 * command ABI structures.
66 */
67struct ib_ucm_cmd_hdr {
68 __u32 cmd;
69 __u16 in;
70 __u16 out;
71};
72
73struct ib_ucm_create_id {
74 __u64 response;
75};
76
77struct ib_ucm_create_id_resp {
78 __u32 id;
79};
80
81struct ib_ucm_destroy_id {
82 __u32 id;
83};
84
85struct ib_ucm_attr_id {
86 __u64 response;
87 __u32 id;
88};
89
90struct ib_ucm_attr_id_resp {
91 __be64 service_id;
92 __be64 service_mask;
93 __be32 local_id;
94 __be32 remote_id;
95};
96
97struct ib_ucm_listen {
98 __be64 service_id;
99 __be64 service_mask;
100 __u32 id;
101};
102
103struct ib_ucm_establish {
104 __u32 id;
105};
106
107struct ib_ucm_private_data {
108 __u64 data;
109 __u32 id;
110 __u8 len;
111 __u8 reserved[3];
112};
113
114struct ib_ucm_path_rec {
115 __u8 dgid[16];
116 __u8 sgid[16];
117 __be16 dlid;
118 __be16 slid;
119 __u32 raw_traffic;
120 __be32 flow_label;
121 __u32 reversible;
122 __u32 mtu;
123 __be16 pkey;
124 __u8 hop_limit;
125 __u8 traffic_class;
126 __u8 numb_path;
127 __u8 sl;
128 __u8 mtu_selector;
129 __u8 rate_selector;
130 __u8 rate;
131 __u8 packet_life_time_selector;
132 __u8 packet_life_time;
133 __u8 preference;
134};
135
136struct ib_ucm_req {
137 __u32 id;
138 __u32 qpn;
139 __u32 qp_type;
140 __u32 psn;
141 __be64 sid;
142 __u64 data;
143 __u64 primary_path;
144 __u64 alternate_path;
145 __u8 len;
146 __u8 peer_to_peer;
147 __u8 responder_resources;
148 __u8 initiator_depth;
149 __u8 remote_cm_response_timeout;
150 __u8 flow_control;
151 __u8 local_cm_response_timeout;
152 __u8 retry_count;
153 __u8 rnr_retry_count;
154 __u8 max_cm_retries;
155 __u8 srq;
156 __u8 reserved[1];
157};
158
159struct ib_ucm_rep {
160 __u64 data;
161 __u32 id;
162 __u32 qpn;
163 __u32 psn;
164 __u8 len;
165 __u8 responder_resources;
166 __u8 initiator_depth;
167 __u8 target_ack_delay;
168 __u8 failover_accepted;
169 __u8 flow_control;
170 __u8 rnr_retry_count;
171 __u8 srq;
172};
173
174struct ib_ucm_info {
175 __u32 id;
176 __u32 status;
177 __u64 info;
178 __u64 data;
179 __u8 info_len;
180 __u8 data_len;
181 __u8 reserved[2];
182};
183
184struct ib_ucm_mra {
185 __u64 data;
186 __u32 id;
187 __u8 len;
188 __u8 timeout;
189 __u8 reserved[2];
190};
191
192struct ib_ucm_lap {
193 __u64 path;
194 __u64 data;
195 __u32 id;
196 __u8 len;
197 __u8 reserved[3];
198};
199
200struct ib_ucm_sidr_req {
201 __u32 id;
202 __u32 timeout;
203 __be64 sid;
204 __u64 data;
205 __u64 path;
206 __u16 pkey;
207 __u8 len;
208 __u8 max_cm_retries;
209};
210
211struct ib_ucm_sidr_rep {
212 __u32 id;
213 __u32 qpn;
214 __u32 qkey;
215 __u32 status;
216 __u64 info;
217 __u64 data;
218 __u8 info_len;
219 __u8 data_len;
220 __u8 reserved[2];
221};
222/*
223 * event notification ABI structures.
224 */
225struct ib_ucm_event_get {
226 __u64 response;
227 __u64 data;
228 __u64 info;
229 __u8 data_len;
230 __u8 info_len;
231 __u8 reserved[2];
232};
233
234struct ib_ucm_req_event_resp {
235 __u32 listen_id;
236 /* device */
237 /* port */
238 struct ib_ucm_path_rec primary_path;
239 struct ib_ucm_path_rec alternate_path;
240 __be64 remote_ca_guid;
241 __u32 remote_qkey;
242 __u32 remote_qpn;
243 __u32 qp_type;
244 __u32 starting_psn;
245 __u8 responder_resources;
246 __u8 initiator_depth;
247 __u8 local_cm_response_timeout;
248 __u8 flow_control;
249 __u8 remote_cm_response_timeout;
250 __u8 retry_count;
251 __u8 rnr_retry_count;
252 __u8 srq;
253};
254
255struct ib_ucm_rep_event_resp {
256 __be64 remote_ca_guid;
257 __u32 remote_qkey;
258 __u32 remote_qpn;
259 __u32 starting_psn;
260 __u8 responder_resources;
261 __u8 initiator_depth;
262 __u8 target_ack_delay;
263 __u8 failover_accepted;
264 __u8 flow_control;
265 __u8 rnr_retry_count;
266 __u8 srq;
267 __u8 reserved[1];
268};
269
270struct ib_ucm_rej_event_resp {
271 __u32 reason;
272 /* ari in ib_ucm_event_get info field. */
273};
274
275struct ib_ucm_mra_event_resp {
276 __u8 timeout;
277 __u8 reserved[3];
278};
279
280struct ib_ucm_lap_event_resp {
281 struct ib_ucm_path_rec path;
282};
283
284struct ib_ucm_apr_event_resp {
285 __u32 status;
286 /* apr info in ib_ucm_event_get info field. */
287};
288
289struct ib_ucm_sidr_req_event_resp {
290 __u32 listen_id;
291 /* device */
292 /* port */
293 __u16 pkey;
294 __u8 reserved[2];
295};
296
297struct ib_ucm_sidr_rep_event_resp {
298 __u32 status;
299 __u32 qkey;
300 __u32 qpn;
301 /* info in ib_ucm_event_get info field. */
302};
303
304#define IB_UCM_PRES_DATA 0x01
305#define IB_UCM_PRES_INFO 0x02
306#define IB_UCM_PRES_PRIMARY 0x04
307#define IB_UCM_PRES_ALTERNATE 0x08
308
309struct ib_ucm_event_resp {
310 __u32 id;
311 __u32 event;
312 __u32 present;
313 union {
314 struct ib_ucm_req_event_resp req_resp;
315 struct ib_ucm_rep_event_resp rep_resp;
316 struct ib_ucm_rej_event_resp rej_resp;
317 struct ib_ucm_mra_event_resp mra_resp;
318 struct ib_ucm_lap_event_resp lap_resp;
319 struct ib_ucm_apr_event_resp apr_resp;
320
321 struct ib_ucm_sidr_req_event_resp sidr_req_resp;
322 struct ib_ucm_sidr_rep_event_resp sidr_rep_resp;
323
324 __u32 send_status;
325 } u;
326};
327
328#endif /* IB_USER_CM_H */
diff --git a/include/rdma/ib_user_mad.h b/include/rdma/ib_user_mad.h
new file mode 100644
index 000000000000..44537aa32e62
--- /dev/null
+++ b/include/rdma/ib_user_mad.h
@@ -0,0 +1,137 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_mad.h 2814 2005-07-06 19:14:09Z halr $
34 */
35
36#ifndef IB_USER_MAD_H
37#define IB_USER_MAD_H
38
39#include <linux/types.h>
40#include <linux/ioctl.h>
41
42/*
43 * Increment this value if any changes that break userspace ABI
44 * compatibility are made.
45 */
46#define IB_USER_MAD_ABI_VERSION 5
47
48/*
49 * Make sure that all structs defined in this file remain laid out so
50 * that they pack the same way on 32-bit and 64-bit architectures (to
51 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
52 */
53
54/**
55 * ib_user_mad_hdr - MAD packet header
56 * @id - ID of agent MAD received with/to be sent with
57 * @status - 0 on successful receive, ETIMEDOUT if no response
58 * received (transaction ID in data[] will be set to TID of original
59 * request) (ignored on send)
60 * @timeout_ms - Milliseconds to wait for response (unset on receive)
61 * @retries - Number of automatic retries to attempt
62 * @qpn - Remote QP number received from/to be sent to
63 * @qkey - Remote Q_Key to be sent with (unset on receive)
64 * @lid - Remote lid received from/to be sent to
65 * @sl - Service level received with/to be sent with
66 * @path_bits - Local path bits received with/to be sent with
67 * @grh_present - If set, GRH was received/should be sent
68 * @gid_index - Local GID index to send with (unset on receive)
69 * @hop_limit - Hop limit in GRH
70 * @traffic_class - Traffic class in GRH
71 * @gid - Remote GID in GRH
72 * @flow_label - Flow label in GRH
73 */
74struct ib_user_mad_hdr {
75 __u32 id;
76 __u32 status;
77 __u32 timeout_ms;
78 __u32 retries;
79 __u32 length;
80 __be32 qpn;
81 __be32 qkey;
82 __be16 lid;
83 __u8 sl;
84 __u8 path_bits;
85 __u8 grh_present;
86 __u8 gid_index;
87 __u8 hop_limit;
88 __u8 traffic_class;
89 __u8 gid[16];
90 __be32 flow_label;
91};
92
93/**
94 * ib_user_mad - MAD packet
95 * @hdr - MAD packet header
96 * @data - Contents of MAD
97 *
98 */
99struct ib_user_mad {
100 struct ib_user_mad_hdr hdr;
101 __u8 data[0];
102};
103
104/**
105 * ib_user_mad_reg_req - MAD registration request
106 * @id - Set by the kernel; used to identify agent in future requests.
107 * @qpn - Queue pair number; must be 0 or 1.
108 * @method_mask - The caller will receive unsolicited MADs for any method
109 * where @method_mask = 1.
110 * @mgmt_class - Indicates which management class of MADs should be receive
111 * by the caller. This field is only required if the user wishes to
112 * receive unsolicited MADs, otherwise it should be 0.
113 * @mgmt_class_version - Indicates which version of MADs for the given
114 * management class to receive.
115 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
116 * in the range from 0x30 to 0x4f. Otherwise not used.
117 * @rmpp_version: If set, indicates the RMPP version used.
118 *
119 */
120struct ib_user_mad_reg_req {
121 __u32 id;
122 __u32 method_mask[4];
123 __u8 qpn;
124 __u8 mgmt_class;
125 __u8 mgmt_class_version;
126 __u8 oui[3];
127 __u8 rmpp_version;
128};
129
130#define IB_IOCTL_MAGIC 0x1b
131
132#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
133 struct ib_user_mad_reg_req)
134
135#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32)
136
137#endif /* IB_USER_MAD_H */
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
new file mode 100644
index 000000000000..7ebb01c8f996
--- /dev/null
+++ b/include/rdma/ib_user_verbs.h
@@ -0,0 +1,422 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: ib_user_verbs.h 2708 2005-06-24 17:27:21Z roland $
34 */
35
36#ifndef IB_USER_VERBS_H
37#define IB_USER_VERBS_H
38
39#include <linux/types.h>
40
41/*
42 * Increment this value if any changes that break userspace ABI
43 * compatibility are made.
44 */
45#define IB_USER_VERBS_ABI_VERSION 1
46
47enum {
48 IB_USER_VERBS_CMD_QUERY_PARAMS,
49 IB_USER_VERBS_CMD_GET_CONTEXT,
50 IB_USER_VERBS_CMD_QUERY_DEVICE,
51 IB_USER_VERBS_CMD_QUERY_PORT,
52 IB_USER_VERBS_CMD_QUERY_GID,
53 IB_USER_VERBS_CMD_QUERY_PKEY,
54 IB_USER_VERBS_CMD_ALLOC_PD,
55 IB_USER_VERBS_CMD_DEALLOC_PD,
56 IB_USER_VERBS_CMD_CREATE_AH,
57 IB_USER_VERBS_CMD_MODIFY_AH,
58 IB_USER_VERBS_CMD_QUERY_AH,
59 IB_USER_VERBS_CMD_DESTROY_AH,
60 IB_USER_VERBS_CMD_REG_MR,
61 IB_USER_VERBS_CMD_REG_SMR,
62 IB_USER_VERBS_CMD_REREG_MR,
63 IB_USER_VERBS_CMD_QUERY_MR,
64 IB_USER_VERBS_CMD_DEREG_MR,
65 IB_USER_VERBS_CMD_ALLOC_MW,
66 IB_USER_VERBS_CMD_BIND_MW,
67 IB_USER_VERBS_CMD_DEALLOC_MW,
68 IB_USER_VERBS_CMD_CREATE_CQ,
69 IB_USER_VERBS_CMD_RESIZE_CQ,
70 IB_USER_VERBS_CMD_DESTROY_CQ,
71 IB_USER_VERBS_CMD_POLL_CQ,
72 IB_USER_VERBS_CMD_PEEK_CQ,
73 IB_USER_VERBS_CMD_REQ_NOTIFY_CQ,
74 IB_USER_VERBS_CMD_CREATE_QP,
75 IB_USER_VERBS_CMD_QUERY_QP,
76 IB_USER_VERBS_CMD_MODIFY_QP,
77 IB_USER_VERBS_CMD_DESTROY_QP,
78 IB_USER_VERBS_CMD_POST_SEND,
79 IB_USER_VERBS_CMD_POST_RECV,
80 IB_USER_VERBS_CMD_ATTACH_MCAST,
81 IB_USER_VERBS_CMD_DETACH_MCAST,
82 IB_USER_VERBS_CMD_CREATE_SRQ,
83 IB_USER_VERBS_CMD_MODIFY_SRQ,
84 IB_USER_VERBS_CMD_QUERY_SRQ,
85 IB_USER_VERBS_CMD_DESTROY_SRQ,
86 IB_USER_VERBS_CMD_POST_SRQ_RECV
87};
88
89/*
90 * Make sure that all structs defined in this file remain laid out so
91 * that they pack the same way on 32-bit and 64-bit architectures (to
92 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
93 * In particular do not use pointer types -- pass pointers in __u64
94 * instead.
95 */
96
97struct ib_uverbs_async_event_desc {
98 __u64 element;
99 __u32 event_type; /* enum ib_event_type */
100 __u32 reserved;
101};
102
103struct ib_uverbs_comp_event_desc {
104 __u64 cq_handle;
105};
106
107/*
108 * All commands from userspace should start with a __u32 command field
109 * followed by __u16 in_words and out_words fields (which give the
110 * length of the command block and response buffer if any in 32-bit
111 * words). The kernel driver will read these fields first and read
112 * the rest of the command struct based on these value.
113 */
114
115struct ib_uverbs_cmd_hdr {
116 __u32 command;
117 __u16 in_words;
118 __u16 out_words;
119};
120
121/*
122 * No driver_data for "query params" command, since this is intended
123 * to be a core function with no possible device dependence.
124 */
125struct ib_uverbs_query_params {
126 __u64 response;
127};
128
129struct ib_uverbs_query_params_resp {
130 __u32 num_cq_events;
131};
132
133struct ib_uverbs_get_context {
134 __u64 response;
135 __u64 cq_fd_tab;
136 __u64 driver_data[0];
137};
138
139struct ib_uverbs_get_context_resp {
140 __u32 async_fd;
141 __u32 reserved;
142};
143
144struct ib_uverbs_query_device {
145 __u64 response;
146 __u64 driver_data[0];
147};
148
149struct ib_uverbs_query_device_resp {
150 __u64 fw_ver;
151 __be64 node_guid;
152 __be64 sys_image_guid;
153 __u64 max_mr_size;
154 __u64 page_size_cap;
155 __u32 vendor_id;
156 __u32 vendor_part_id;
157 __u32 hw_ver;
158 __u32 max_qp;
159 __u32 max_qp_wr;
160 __u32 device_cap_flags;
161 __u32 max_sge;
162 __u32 max_sge_rd;
163 __u32 max_cq;
164 __u32 max_cqe;
165 __u32 max_mr;
166 __u32 max_pd;
167 __u32 max_qp_rd_atom;
168 __u32 max_ee_rd_atom;
169 __u32 max_res_rd_atom;
170 __u32 max_qp_init_rd_atom;
171 __u32 max_ee_init_rd_atom;
172 __u32 atomic_cap;
173 __u32 max_ee;
174 __u32 max_rdd;
175 __u32 max_mw;
176 __u32 max_raw_ipv6_qp;
177 __u32 max_raw_ethy_qp;
178 __u32 max_mcast_grp;
179 __u32 max_mcast_qp_attach;
180 __u32 max_total_mcast_qp_attach;
181 __u32 max_ah;
182 __u32 max_fmr;
183 __u32 max_map_per_fmr;
184 __u32 max_srq;
185 __u32 max_srq_wr;
186 __u32 max_srq_sge;
187 __u16 max_pkeys;
188 __u8 local_ca_ack_delay;
189 __u8 phys_port_cnt;
190 __u8 reserved[4];
191};
192
193struct ib_uverbs_query_port {
194 __u64 response;
195 __u8 port_num;
196 __u8 reserved[7];
197 __u64 driver_data[0];
198};
199
200struct ib_uverbs_query_port_resp {
201 __u32 port_cap_flags;
202 __u32 max_msg_sz;
203 __u32 bad_pkey_cntr;
204 __u32 qkey_viol_cntr;
205 __u32 gid_tbl_len;
206 __u16 pkey_tbl_len;
207 __u16 lid;
208 __u16 sm_lid;
209 __u8 state;
210 __u8 max_mtu;
211 __u8 active_mtu;
212 __u8 lmc;
213 __u8 max_vl_num;
214 __u8 sm_sl;
215 __u8 subnet_timeout;
216 __u8 init_type_reply;
217 __u8 active_width;
218 __u8 active_speed;
219 __u8 phys_state;
220 __u8 reserved[3];
221};
222
223struct ib_uverbs_query_gid {
224 __u64 response;
225 __u8 port_num;
226 __u8 index;
227 __u8 reserved[6];
228 __u64 driver_data[0];
229};
230
231struct ib_uverbs_query_gid_resp {
232 __u8 gid[16];
233};
234
235struct ib_uverbs_query_pkey {
236 __u64 response;
237 __u8 port_num;
238 __u8 index;
239 __u8 reserved[6];
240 __u64 driver_data[0];
241};
242
243struct ib_uverbs_query_pkey_resp {
244 __u16 pkey;
245 __u16 reserved;
246};
247
248struct ib_uverbs_alloc_pd {
249 __u64 response;
250 __u64 driver_data[0];
251};
252
253struct ib_uverbs_alloc_pd_resp {
254 __u32 pd_handle;
255};
256
257struct ib_uverbs_dealloc_pd {
258 __u32 pd_handle;
259};
260
261struct ib_uverbs_reg_mr {
262 __u64 response;
263 __u64 start;
264 __u64 length;
265 __u64 hca_va;
266 __u32 pd_handle;
267 __u32 access_flags;
268 __u64 driver_data[0];
269};
270
271struct ib_uverbs_reg_mr_resp {
272 __u32 mr_handle;
273 __u32 lkey;
274 __u32 rkey;
275};
276
277struct ib_uverbs_dereg_mr {
278 __u32 mr_handle;
279};
280
281struct ib_uverbs_create_cq {
282 __u64 response;
283 __u64 user_handle;
284 __u32 cqe;
285 __u32 event_handler;
286 __u64 driver_data[0];
287};
288
289struct ib_uverbs_create_cq_resp {
290 __u32 cq_handle;
291 __u32 cqe;
292};
293
294struct ib_uverbs_destroy_cq {
295 __u32 cq_handle;
296};
297
298struct ib_uverbs_create_qp {
299 __u64 response;
300 __u64 user_handle;
301 __u32 pd_handle;
302 __u32 send_cq_handle;
303 __u32 recv_cq_handle;
304 __u32 srq_handle;
305 __u32 max_send_wr;
306 __u32 max_recv_wr;
307 __u32 max_send_sge;
308 __u32 max_recv_sge;
309 __u32 max_inline_data;
310 __u8 sq_sig_all;
311 __u8 qp_type;
312 __u8 is_srq;
313 __u8 reserved;
314 __u64 driver_data[0];
315};
316
317struct ib_uverbs_create_qp_resp {
318 __u32 qp_handle;
319 __u32 qpn;
320};
321
322/*
323 * This struct needs to remain a multiple of 8 bytes to keep the
324 * alignment of the modify QP parameters.
325 */
326struct ib_uverbs_qp_dest {
327 __u8 dgid[16];
328 __u32 flow_label;
329 __u16 dlid;
330 __u16 reserved;
331 __u8 sgid_index;
332 __u8 hop_limit;
333 __u8 traffic_class;
334 __u8 sl;
335 __u8 src_path_bits;
336 __u8 static_rate;
337 __u8 is_global;
338 __u8 port_num;
339};
340
341struct ib_uverbs_modify_qp {
342 struct ib_uverbs_qp_dest dest;
343 struct ib_uverbs_qp_dest alt_dest;
344 __u32 qp_handle;
345 __u32 attr_mask;
346 __u32 qkey;
347 __u32 rq_psn;
348 __u32 sq_psn;
349 __u32 dest_qp_num;
350 __u32 qp_access_flags;
351 __u16 pkey_index;
352 __u16 alt_pkey_index;
353 __u8 qp_state;
354 __u8 cur_qp_state;
355 __u8 path_mtu;
356 __u8 path_mig_state;
357 __u8 en_sqd_async_notify;
358 __u8 max_rd_atomic;
359 __u8 max_dest_rd_atomic;
360 __u8 min_rnr_timer;
361 __u8 port_num;
362 __u8 timeout;
363 __u8 retry_cnt;
364 __u8 rnr_retry;
365 __u8 alt_port_num;
366 __u8 alt_timeout;
367 __u8 reserved[2];
368 __u64 driver_data[0];
369};
370
371struct ib_uverbs_modify_qp_resp {
372};
373
374struct ib_uverbs_destroy_qp {
375 __u32 qp_handle;
376};
377
378struct ib_uverbs_attach_mcast {
379 __u8 gid[16];
380 __u32 qp_handle;
381 __u16 mlid;
382 __u16 reserved;
383 __u64 driver_data[0];
384};
385
386struct ib_uverbs_detach_mcast {
387 __u8 gid[16];
388 __u32 qp_handle;
389 __u16 mlid;
390 __u16 reserved;
391 __u64 driver_data[0];
392};
393
394struct ib_uverbs_create_srq {
395 __u64 response;
396 __u64 user_handle;
397 __u32 pd_handle;
398 __u32 max_wr;
399 __u32 max_sge;
400 __u32 srq_limit;
401 __u64 driver_data[0];
402};
403
404struct ib_uverbs_create_srq_resp {
405 __u32 srq_handle;
406};
407
408struct ib_uverbs_modify_srq {
409 __u32 srq_handle;
410 __u32 attr_mask;
411 __u32 max_wr;
412 __u32 max_sge;
413 __u32 srq_limit;
414 __u32 reserved;
415 __u64 driver_data[0];
416};
417
418struct ib_uverbs_destroy_srq {
419 __u32 srq_handle;
420};
421
422#endif /* IB_USER_VERBS_H */
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
new file mode 100644
index 000000000000..e16cf94870f2
--- /dev/null
+++ b/include/rdma/ib_verbs.h
@@ -0,0 +1,1461 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005 Cisco Systems. All rights reserved.
9 *
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
15 *
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
18 * conditions are met:
19 *
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer.
23 *
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 * SOFTWARE.
37 *
38 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
39 */
40
41#if !defined(IB_VERBS_H)
42#define IB_VERBS_H
43
44#include <linux/types.h>
45#include <linux/device.h>
46
47#include <asm/atomic.h>
48#include <asm/scatterlist.h>
49#include <asm/uaccess.h>
50
51union ib_gid {
52 u8 raw[16];
53 struct {
54 __be64 subnet_prefix;
55 __be64 interface_id;
56 } global;
57};
58
59enum ib_node_type {
60 IB_NODE_CA = 1,
61 IB_NODE_SWITCH,
62 IB_NODE_ROUTER
63};
64
65enum ib_device_cap_flags {
66 IB_DEVICE_RESIZE_MAX_WR = 1,
67 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
68 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
69 IB_DEVICE_RAW_MULTI = (1<<3),
70 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
71 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
72 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
73 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
74 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
75 IB_DEVICE_INIT_TYPE = (1<<9),
76 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
77 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
78 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
79 IB_DEVICE_SRQ_RESIZE = (1<<13),
80 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
81};
82
83enum ib_atomic_cap {
84 IB_ATOMIC_NONE,
85 IB_ATOMIC_HCA,
86 IB_ATOMIC_GLOB
87};
88
89struct ib_device_attr {
90 u64 fw_ver;
91 __be64 node_guid;
92 __be64 sys_image_guid;
93 u64 max_mr_size;
94 u64 page_size_cap;
95 u32 vendor_id;
96 u32 vendor_part_id;
97 u32 hw_ver;
98 int max_qp;
99 int max_qp_wr;
100 int device_cap_flags;
101 int max_sge;
102 int max_sge_rd;
103 int max_cq;
104 int max_cqe;
105 int max_mr;
106 int max_pd;
107 int max_qp_rd_atom;
108 int max_ee_rd_atom;
109 int max_res_rd_atom;
110 int max_qp_init_rd_atom;
111 int max_ee_init_rd_atom;
112 enum ib_atomic_cap atomic_cap;
113 int max_ee;
114 int max_rdd;
115 int max_mw;
116 int max_raw_ipv6_qp;
117 int max_raw_ethy_qp;
118 int max_mcast_grp;
119 int max_mcast_qp_attach;
120 int max_total_mcast_qp_attach;
121 int max_ah;
122 int max_fmr;
123 int max_map_per_fmr;
124 int max_srq;
125 int max_srq_wr;
126 int max_srq_sge;
127 u16 max_pkeys;
128 u8 local_ca_ack_delay;
129};
130
131enum ib_mtu {
132 IB_MTU_256 = 1,
133 IB_MTU_512 = 2,
134 IB_MTU_1024 = 3,
135 IB_MTU_2048 = 4,
136 IB_MTU_4096 = 5
137};
138
139static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
140{
141 switch (mtu) {
142 case IB_MTU_256: return 256;
143 case IB_MTU_512: return 512;
144 case IB_MTU_1024: return 1024;
145 case IB_MTU_2048: return 2048;
146 case IB_MTU_4096: return 4096;
147 default: return -1;
148 }
149}
150
151enum ib_port_state {
152 IB_PORT_NOP = 0,
153 IB_PORT_DOWN = 1,
154 IB_PORT_INIT = 2,
155 IB_PORT_ARMED = 3,
156 IB_PORT_ACTIVE = 4,
157 IB_PORT_ACTIVE_DEFER = 5
158};
159
160enum ib_port_cap_flags {
161 IB_PORT_SM = 1 << 1,
162 IB_PORT_NOTICE_SUP = 1 << 2,
163 IB_PORT_TRAP_SUP = 1 << 3,
164 IB_PORT_OPT_IPD_SUP = 1 << 4,
165 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
166 IB_PORT_SL_MAP_SUP = 1 << 6,
167 IB_PORT_MKEY_NVRAM = 1 << 7,
168 IB_PORT_PKEY_NVRAM = 1 << 8,
169 IB_PORT_LED_INFO_SUP = 1 << 9,
170 IB_PORT_SM_DISABLED = 1 << 10,
171 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
172 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
173 IB_PORT_CM_SUP = 1 << 16,
174 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
175 IB_PORT_REINIT_SUP = 1 << 18,
176 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
177 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
178 IB_PORT_DR_NOTICE_SUP = 1 << 21,
179 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
180 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
181 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
182 IB_PORT_CLIENT_REG_SUP = 1 << 25
183};
184
185enum ib_port_width {
186 IB_WIDTH_1X = 1,
187 IB_WIDTH_4X = 2,
188 IB_WIDTH_8X = 4,
189 IB_WIDTH_12X = 8
190};
191
192static inline int ib_width_enum_to_int(enum ib_port_width width)
193{
194 switch (width) {
195 case IB_WIDTH_1X: return 1;
196 case IB_WIDTH_4X: return 4;
197 case IB_WIDTH_8X: return 8;
198 case IB_WIDTH_12X: return 12;
199 default: return -1;
200 }
201}
202
203struct ib_port_attr {
204 enum ib_port_state state;
205 enum ib_mtu max_mtu;
206 enum ib_mtu active_mtu;
207 int gid_tbl_len;
208 u32 port_cap_flags;
209 u32 max_msg_sz;
210 u32 bad_pkey_cntr;
211 u32 qkey_viol_cntr;
212 u16 pkey_tbl_len;
213 u16 lid;
214 u16 sm_lid;
215 u8 lmc;
216 u8 max_vl_num;
217 u8 sm_sl;
218 u8 subnet_timeout;
219 u8 init_type_reply;
220 u8 active_width;
221 u8 active_speed;
222 u8 phys_state;
223};
224
225enum ib_device_modify_flags {
226 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
227};
228
229struct ib_device_modify {
230 u64 sys_image_guid;
231};
232
233enum ib_port_modify_flags {
234 IB_PORT_SHUTDOWN = 1,
235 IB_PORT_INIT_TYPE = (1<<2),
236 IB_PORT_RESET_QKEY_CNTR = (1<<3)
237};
238
239struct ib_port_modify {
240 u32 set_port_cap_mask;
241 u32 clr_port_cap_mask;
242 u8 init_type;
243};
244
245enum ib_event_type {
246 IB_EVENT_CQ_ERR,
247 IB_EVENT_QP_FATAL,
248 IB_EVENT_QP_REQ_ERR,
249 IB_EVENT_QP_ACCESS_ERR,
250 IB_EVENT_COMM_EST,
251 IB_EVENT_SQ_DRAINED,
252 IB_EVENT_PATH_MIG,
253 IB_EVENT_PATH_MIG_ERR,
254 IB_EVENT_DEVICE_FATAL,
255 IB_EVENT_PORT_ACTIVE,
256 IB_EVENT_PORT_ERR,
257 IB_EVENT_LID_CHANGE,
258 IB_EVENT_PKEY_CHANGE,
259 IB_EVENT_SM_CHANGE,
260 IB_EVENT_SRQ_ERR,
261 IB_EVENT_SRQ_LIMIT_REACHED,
262 IB_EVENT_QP_LAST_WQE_REACHED
263};
264
265struct ib_event {
266 struct ib_device *device;
267 union {
268 struct ib_cq *cq;
269 struct ib_qp *qp;
270 struct ib_srq *srq;
271 u8 port_num;
272 } element;
273 enum ib_event_type event;
274};
275
276struct ib_event_handler {
277 struct ib_device *device;
278 void (*handler)(struct ib_event_handler *, struct ib_event *);
279 struct list_head list;
280};
281
282#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
283 do { \
284 (_ptr)->device = _device; \
285 (_ptr)->handler = _handler; \
286 INIT_LIST_HEAD(&(_ptr)->list); \
287 } while (0)
288
289struct ib_global_route {
290 union ib_gid dgid;
291 u32 flow_label;
292 u8 sgid_index;
293 u8 hop_limit;
294 u8 traffic_class;
295};
296
297struct ib_grh {
298 __be32 version_tclass_flow;
299 __be16 paylen;
300 u8 next_hdr;
301 u8 hop_limit;
302 union ib_gid sgid;
303 union ib_gid dgid;
304};
305
306enum {
307 IB_MULTICAST_QPN = 0xffffff
308};
309
310#define IB_LID_PERMISSIVE __constant_htons(0xFFFF)
311
312enum ib_ah_flags {
313 IB_AH_GRH = 1
314};
315
316struct ib_ah_attr {
317 struct ib_global_route grh;
318 u16 dlid;
319 u8 sl;
320 u8 src_path_bits;
321 u8 static_rate;
322 u8 ah_flags;
323 u8 port_num;
324};
325
326enum ib_wc_status {
327 IB_WC_SUCCESS,
328 IB_WC_LOC_LEN_ERR,
329 IB_WC_LOC_QP_OP_ERR,
330 IB_WC_LOC_EEC_OP_ERR,
331 IB_WC_LOC_PROT_ERR,
332 IB_WC_WR_FLUSH_ERR,
333 IB_WC_MW_BIND_ERR,
334 IB_WC_BAD_RESP_ERR,
335 IB_WC_LOC_ACCESS_ERR,
336 IB_WC_REM_INV_REQ_ERR,
337 IB_WC_REM_ACCESS_ERR,
338 IB_WC_REM_OP_ERR,
339 IB_WC_RETRY_EXC_ERR,
340 IB_WC_RNR_RETRY_EXC_ERR,
341 IB_WC_LOC_RDD_VIOL_ERR,
342 IB_WC_REM_INV_RD_REQ_ERR,
343 IB_WC_REM_ABORT_ERR,
344 IB_WC_INV_EECN_ERR,
345 IB_WC_INV_EEC_STATE_ERR,
346 IB_WC_FATAL_ERR,
347 IB_WC_RESP_TIMEOUT_ERR,
348 IB_WC_GENERAL_ERR
349};
350
351enum ib_wc_opcode {
352 IB_WC_SEND,
353 IB_WC_RDMA_WRITE,
354 IB_WC_RDMA_READ,
355 IB_WC_COMP_SWAP,
356 IB_WC_FETCH_ADD,
357 IB_WC_BIND_MW,
358/*
359 * Set value of IB_WC_RECV so consumers can test if a completion is a
360 * receive by testing (opcode & IB_WC_RECV).
361 */
362 IB_WC_RECV = 1 << 7,
363 IB_WC_RECV_RDMA_WITH_IMM
364};
365
366enum ib_wc_flags {
367 IB_WC_GRH = 1,
368 IB_WC_WITH_IMM = (1<<1)
369};
370
371struct ib_wc {
372 u64 wr_id;
373 enum ib_wc_status status;
374 enum ib_wc_opcode opcode;
375 u32 vendor_err;
376 u32 byte_len;
377 __be32 imm_data;
378 u32 qp_num;
379 u32 src_qp;
380 int wc_flags;
381 u16 pkey_index;
382 u16 slid;
383 u8 sl;
384 u8 dlid_path_bits;
385 u8 port_num; /* valid only for DR SMPs on switches */
386};
387
388enum ib_cq_notify {
389 IB_CQ_SOLICITED,
390 IB_CQ_NEXT_COMP
391};
392
393enum ib_srq_attr_mask {
394 IB_SRQ_MAX_WR = 1 << 0,
395 IB_SRQ_LIMIT = 1 << 1,
396};
397
398struct ib_srq_attr {
399 u32 max_wr;
400 u32 max_sge;
401 u32 srq_limit;
402};
403
404struct ib_srq_init_attr {
405 void (*event_handler)(struct ib_event *, void *);
406 void *srq_context;
407 struct ib_srq_attr attr;
408};
409
410struct ib_qp_cap {
411 u32 max_send_wr;
412 u32 max_recv_wr;
413 u32 max_send_sge;
414 u32 max_recv_sge;
415 u32 max_inline_data;
416};
417
418enum ib_sig_type {
419 IB_SIGNAL_ALL_WR,
420 IB_SIGNAL_REQ_WR
421};
422
423enum ib_qp_type {
424 /*
425 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
426 * here (and in that order) since the MAD layer uses them as
427 * indices into a 2-entry table.
428 */
429 IB_QPT_SMI,
430 IB_QPT_GSI,
431
432 IB_QPT_RC,
433 IB_QPT_UC,
434 IB_QPT_UD,
435 IB_QPT_RAW_IPV6,
436 IB_QPT_RAW_ETY
437};
438
439struct ib_qp_init_attr {
440 void (*event_handler)(struct ib_event *, void *);
441 void *qp_context;
442 struct ib_cq *send_cq;
443 struct ib_cq *recv_cq;
444 struct ib_srq *srq;
445 struct ib_qp_cap cap;
446 enum ib_sig_type sq_sig_type;
447 enum ib_qp_type qp_type;
448 u8 port_num; /* special QP types only */
449};
450
451enum ib_rnr_timeout {
452 IB_RNR_TIMER_655_36 = 0,
453 IB_RNR_TIMER_000_01 = 1,
454 IB_RNR_TIMER_000_02 = 2,
455 IB_RNR_TIMER_000_03 = 3,
456 IB_RNR_TIMER_000_04 = 4,
457 IB_RNR_TIMER_000_06 = 5,
458 IB_RNR_TIMER_000_08 = 6,
459 IB_RNR_TIMER_000_12 = 7,
460 IB_RNR_TIMER_000_16 = 8,
461 IB_RNR_TIMER_000_24 = 9,
462 IB_RNR_TIMER_000_32 = 10,
463 IB_RNR_TIMER_000_48 = 11,
464 IB_RNR_TIMER_000_64 = 12,
465 IB_RNR_TIMER_000_96 = 13,
466 IB_RNR_TIMER_001_28 = 14,
467 IB_RNR_TIMER_001_92 = 15,
468 IB_RNR_TIMER_002_56 = 16,
469 IB_RNR_TIMER_003_84 = 17,
470 IB_RNR_TIMER_005_12 = 18,
471 IB_RNR_TIMER_007_68 = 19,
472 IB_RNR_TIMER_010_24 = 20,
473 IB_RNR_TIMER_015_36 = 21,
474 IB_RNR_TIMER_020_48 = 22,
475 IB_RNR_TIMER_030_72 = 23,
476 IB_RNR_TIMER_040_96 = 24,
477 IB_RNR_TIMER_061_44 = 25,
478 IB_RNR_TIMER_081_92 = 26,
479 IB_RNR_TIMER_122_88 = 27,
480 IB_RNR_TIMER_163_84 = 28,
481 IB_RNR_TIMER_245_76 = 29,
482 IB_RNR_TIMER_327_68 = 30,
483 IB_RNR_TIMER_491_52 = 31
484};
485
486enum ib_qp_attr_mask {
487 IB_QP_STATE = 1,
488 IB_QP_CUR_STATE = (1<<1),
489 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
490 IB_QP_ACCESS_FLAGS = (1<<3),
491 IB_QP_PKEY_INDEX = (1<<4),
492 IB_QP_PORT = (1<<5),
493 IB_QP_QKEY = (1<<6),
494 IB_QP_AV = (1<<7),
495 IB_QP_PATH_MTU = (1<<8),
496 IB_QP_TIMEOUT = (1<<9),
497 IB_QP_RETRY_CNT = (1<<10),
498 IB_QP_RNR_RETRY = (1<<11),
499 IB_QP_RQ_PSN = (1<<12),
500 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
501 IB_QP_ALT_PATH = (1<<14),
502 IB_QP_MIN_RNR_TIMER = (1<<15),
503 IB_QP_SQ_PSN = (1<<16),
504 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
505 IB_QP_PATH_MIG_STATE = (1<<18),
506 IB_QP_CAP = (1<<19),
507 IB_QP_DEST_QPN = (1<<20)
508};
509
510enum ib_qp_state {
511 IB_QPS_RESET,
512 IB_QPS_INIT,
513 IB_QPS_RTR,
514 IB_QPS_RTS,
515 IB_QPS_SQD,
516 IB_QPS_SQE,
517 IB_QPS_ERR
518};
519
520enum ib_mig_state {
521 IB_MIG_MIGRATED,
522 IB_MIG_REARM,
523 IB_MIG_ARMED
524};
525
526struct ib_qp_attr {
527 enum ib_qp_state qp_state;
528 enum ib_qp_state cur_qp_state;
529 enum ib_mtu path_mtu;
530 enum ib_mig_state path_mig_state;
531 u32 qkey;
532 u32 rq_psn;
533 u32 sq_psn;
534 u32 dest_qp_num;
535 int qp_access_flags;
536 struct ib_qp_cap cap;
537 struct ib_ah_attr ah_attr;
538 struct ib_ah_attr alt_ah_attr;
539 u16 pkey_index;
540 u16 alt_pkey_index;
541 u8 en_sqd_async_notify;
542 u8 sq_draining;
543 u8 max_rd_atomic;
544 u8 max_dest_rd_atomic;
545 u8 min_rnr_timer;
546 u8 port_num;
547 u8 timeout;
548 u8 retry_cnt;
549 u8 rnr_retry;
550 u8 alt_port_num;
551 u8 alt_timeout;
552};
553
554enum ib_wr_opcode {
555 IB_WR_RDMA_WRITE,
556 IB_WR_RDMA_WRITE_WITH_IMM,
557 IB_WR_SEND,
558 IB_WR_SEND_WITH_IMM,
559 IB_WR_RDMA_READ,
560 IB_WR_ATOMIC_CMP_AND_SWP,
561 IB_WR_ATOMIC_FETCH_AND_ADD
562};
563
564enum ib_send_flags {
565 IB_SEND_FENCE = 1,
566 IB_SEND_SIGNALED = (1<<1),
567 IB_SEND_SOLICITED = (1<<2),
568 IB_SEND_INLINE = (1<<3)
569};
570
571struct ib_sge {
572 u64 addr;
573 u32 length;
574 u32 lkey;
575};
576
577struct ib_send_wr {
578 struct ib_send_wr *next;
579 u64 wr_id;
580 struct ib_sge *sg_list;
581 int num_sge;
582 enum ib_wr_opcode opcode;
583 int send_flags;
584 __be32 imm_data;
585 union {
586 struct {
587 u64 remote_addr;
588 u32 rkey;
589 } rdma;
590 struct {
591 u64 remote_addr;
592 u64 compare_add;
593 u64 swap;
594 u32 rkey;
595 } atomic;
596 struct {
597 struct ib_ah *ah;
598 struct ib_mad_hdr *mad_hdr;
599 u32 remote_qpn;
600 u32 remote_qkey;
601 int timeout_ms; /* valid for MADs only */
602 int retries; /* valid for MADs only */
603 u16 pkey_index; /* valid for GSI only */
604 u8 port_num; /* valid for DR SMPs on switch only */
605 } ud;
606 } wr;
607};
608
609struct ib_recv_wr {
610 struct ib_recv_wr *next;
611 u64 wr_id;
612 struct ib_sge *sg_list;
613 int num_sge;
614};
615
616enum ib_access_flags {
617 IB_ACCESS_LOCAL_WRITE = 1,
618 IB_ACCESS_REMOTE_WRITE = (1<<1),
619 IB_ACCESS_REMOTE_READ = (1<<2),
620 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
621 IB_ACCESS_MW_BIND = (1<<4)
622};
623
624struct ib_phys_buf {
625 u64 addr;
626 u64 size;
627};
628
629struct ib_mr_attr {
630 struct ib_pd *pd;
631 u64 device_virt_addr;
632 u64 size;
633 int mr_access_flags;
634 u32 lkey;
635 u32 rkey;
636};
637
638enum ib_mr_rereg_flags {
639 IB_MR_REREG_TRANS = 1,
640 IB_MR_REREG_PD = (1<<1),
641 IB_MR_REREG_ACCESS = (1<<2)
642};
643
644struct ib_mw_bind {
645 struct ib_mr *mr;
646 u64 wr_id;
647 u64 addr;
648 u32 length;
649 int send_flags;
650 int mw_access_flags;
651};
652
653struct ib_fmr_attr {
654 int max_pages;
655 int max_maps;
656 u8 page_size;
657};
658
659struct ib_ucontext {
660 struct ib_device *device;
661 struct list_head pd_list;
662 struct list_head mr_list;
663 struct list_head mw_list;
664 struct list_head cq_list;
665 struct list_head qp_list;
666 struct list_head srq_list;
667 struct list_head ah_list;
668 spinlock_t lock;
669};
670
671struct ib_uobject {
672 u64 user_handle; /* handle given to us by userspace */
673 struct ib_ucontext *context; /* associated user context */
674 struct list_head list; /* link to context's list */
675 u32 id; /* index into kernel idr */
676};
677
678struct ib_umem {
679 unsigned long user_base;
680 unsigned long virt_base;
681 size_t length;
682 int offset;
683 int page_size;
684 int writable;
685 struct list_head chunk_list;
686};
687
688struct ib_umem_chunk {
689 struct list_head list;
690 int nents;
691 int nmap;
692 struct scatterlist page_list[0];
693};
694
695struct ib_udata {
696 void __user *inbuf;
697 void __user *outbuf;
698 size_t inlen;
699 size_t outlen;
700};
701
702#define IB_UMEM_MAX_PAGE_CHUNK \
703 ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \
704 ((void *) &((struct ib_umem_chunk *) 0)->page_list[1] - \
705 (void *) &((struct ib_umem_chunk *) 0)->page_list[0]))
706
707struct ib_umem_object {
708 struct ib_uobject uobject;
709 struct ib_umem umem;
710};
711
712struct ib_pd {
713 struct ib_device *device;
714 struct ib_uobject *uobject;
715 atomic_t usecnt; /* count all resources */
716};
717
718struct ib_ah {
719 struct ib_device *device;
720 struct ib_pd *pd;
721 struct ib_uobject *uobject;
722};
723
724typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
725
726struct ib_cq {
727 struct ib_device *device;
728 struct ib_uobject *uobject;
729 ib_comp_handler comp_handler;
730 void (*event_handler)(struct ib_event *, void *);
731 void * cq_context;
732 int cqe;
733 atomic_t usecnt; /* count number of work queues */
734};
735
736struct ib_srq {
737 struct ib_device *device;
738 struct ib_pd *pd;
739 struct ib_uobject *uobject;
740 void (*event_handler)(struct ib_event *, void *);
741 void *srq_context;
742 atomic_t usecnt;
743};
744
745struct ib_qp {
746 struct ib_device *device;
747 struct ib_pd *pd;
748 struct ib_cq *send_cq;
749 struct ib_cq *recv_cq;
750 struct ib_srq *srq;
751 struct ib_uobject *uobject;
752 void (*event_handler)(struct ib_event *, void *);
753 void *qp_context;
754 u32 qp_num;
755 enum ib_qp_type qp_type;
756};
757
758struct ib_mr {
759 struct ib_device *device;
760 struct ib_pd *pd;
761 struct ib_uobject *uobject;
762 u32 lkey;
763 u32 rkey;
764 atomic_t usecnt; /* count number of MWs */
765};
766
767struct ib_mw {
768 struct ib_device *device;
769 struct ib_pd *pd;
770 struct ib_uobject *uobject;
771 u32 rkey;
772};
773
774struct ib_fmr {
775 struct ib_device *device;
776 struct ib_pd *pd;
777 struct list_head list;
778 u32 lkey;
779 u32 rkey;
780};
781
782struct ib_mad;
783struct ib_grh;
784
785enum ib_process_mad_flags {
786 IB_MAD_IGNORE_MKEY = 1,
787 IB_MAD_IGNORE_BKEY = 2,
788 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
789};
790
791enum ib_mad_result {
792 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
793 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
794 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
795 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
796};
797
798#define IB_DEVICE_NAME_MAX 64
799
800struct ib_cache {
801 rwlock_t lock;
802 struct ib_event_handler event_handler;
803 struct ib_pkey_cache **pkey_cache;
804 struct ib_gid_cache **gid_cache;
805};
806
807struct ib_device {
808 struct device *dma_device;
809
810 char name[IB_DEVICE_NAME_MAX];
811
812 struct list_head event_handler_list;
813 spinlock_t event_handler_lock;
814
815 struct list_head core_list;
816 struct list_head client_data_list;
817 spinlock_t client_data_lock;
818
819 struct ib_cache cache;
820
821 u32 flags;
822
823 int (*query_device)(struct ib_device *device,
824 struct ib_device_attr *device_attr);
825 int (*query_port)(struct ib_device *device,
826 u8 port_num,
827 struct ib_port_attr *port_attr);
828 int (*query_gid)(struct ib_device *device,
829 u8 port_num, int index,
830 union ib_gid *gid);
831 int (*query_pkey)(struct ib_device *device,
832 u8 port_num, u16 index, u16 *pkey);
833 int (*modify_device)(struct ib_device *device,
834 int device_modify_mask,
835 struct ib_device_modify *device_modify);
836 int (*modify_port)(struct ib_device *device,
837 u8 port_num, int port_modify_mask,
838 struct ib_port_modify *port_modify);
839 struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device,
840 struct ib_udata *udata);
841 int (*dealloc_ucontext)(struct ib_ucontext *context);
842 int (*mmap)(struct ib_ucontext *context,
843 struct vm_area_struct *vma);
844 struct ib_pd * (*alloc_pd)(struct ib_device *device,
845 struct ib_ucontext *context,
846 struct ib_udata *udata);
847 int (*dealloc_pd)(struct ib_pd *pd);
848 struct ib_ah * (*create_ah)(struct ib_pd *pd,
849 struct ib_ah_attr *ah_attr);
850 int (*modify_ah)(struct ib_ah *ah,
851 struct ib_ah_attr *ah_attr);
852 int (*query_ah)(struct ib_ah *ah,
853 struct ib_ah_attr *ah_attr);
854 int (*destroy_ah)(struct ib_ah *ah);
855 struct ib_srq * (*create_srq)(struct ib_pd *pd,
856 struct ib_srq_init_attr *srq_init_attr,
857 struct ib_udata *udata);
858 int (*modify_srq)(struct ib_srq *srq,
859 struct ib_srq_attr *srq_attr,
860 enum ib_srq_attr_mask srq_attr_mask);
861 int (*query_srq)(struct ib_srq *srq,
862 struct ib_srq_attr *srq_attr);
863 int (*destroy_srq)(struct ib_srq *srq);
864 int (*post_srq_recv)(struct ib_srq *srq,
865 struct ib_recv_wr *recv_wr,
866 struct ib_recv_wr **bad_recv_wr);
867 struct ib_qp * (*create_qp)(struct ib_pd *pd,
868 struct ib_qp_init_attr *qp_init_attr,
869 struct ib_udata *udata);
870 int (*modify_qp)(struct ib_qp *qp,
871 struct ib_qp_attr *qp_attr,
872 int qp_attr_mask);
873 int (*query_qp)(struct ib_qp *qp,
874 struct ib_qp_attr *qp_attr,
875 int qp_attr_mask,
876 struct ib_qp_init_attr *qp_init_attr);
877 int (*destroy_qp)(struct ib_qp *qp);
878 int (*post_send)(struct ib_qp *qp,
879 struct ib_send_wr *send_wr,
880 struct ib_send_wr **bad_send_wr);
881 int (*post_recv)(struct ib_qp *qp,
882 struct ib_recv_wr *recv_wr,
883 struct ib_recv_wr **bad_recv_wr);
884 struct ib_cq * (*create_cq)(struct ib_device *device, int cqe,
885 struct ib_ucontext *context,
886 struct ib_udata *udata);
887 int (*destroy_cq)(struct ib_cq *cq);
888 int (*resize_cq)(struct ib_cq *cq, int *cqe);
889 int (*poll_cq)(struct ib_cq *cq, int num_entries,
890 struct ib_wc *wc);
891 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
892 int (*req_notify_cq)(struct ib_cq *cq,
893 enum ib_cq_notify cq_notify);
894 int (*req_ncomp_notif)(struct ib_cq *cq,
895 int wc_cnt);
896 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
897 int mr_access_flags);
898 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
899 struct ib_phys_buf *phys_buf_array,
900 int num_phys_buf,
901 int mr_access_flags,
902 u64 *iova_start);
903 struct ib_mr * (*reg_user_mr)(struct ib_pd *pd,
904 struct ib_umem *region,
905 int mr_access_flags,
906 struct ib_udata *udata);
907 int (*query_mr)(struct ib_mr *mr,
908 struct ib_mr_attr *mr_attr);
909 int (*dereg_mr)(struct ib_mr *mr);
910 int (*rereg_phys_mr)(struct ib_mr *mr,
911 int mr_rereg_mask,
912 struct ib_pd *pd,
913 struct ib_phys_buf *phys_buf_array,
914 int num_phys_buf,
915 int mr_access_flags,
916 u64 *iova_start);
917 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
918 int (*bind_mw)(struct ib_qp *qp,
919 struct ib_mw *mw,
920 struct ib_mw_bind *mw_bind);
921 int (*dealloc_mw)(struct ib_mw *mw);
922 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
923 int mr_access_flags,
924 struct ib_fmr_attr *fmr_attr);
925 int (*map_phys_fmr)(struct ib_fmr *fmr,
926 u64 *page_list, int list_len,
927 u64 iova);
928 int (*unmap_fmr)(struct list_head *fmr_list);
929 int (*dealloc_fmr)(struct ib_fmr *fmr);
930 int (*attach_mcast)(struct ib_qp *qp,
931 union ib_gid *gid,
932 u16 lid);
933 int (*detach_mcast)(struct ib_qp *qp,
934 union ib_gid *gid,
935 u16 lid);
936 int (*process_mad)(struct ib_device *device,
937 int process_mad_flags,
938 u8 port_num,
939 struct ib_wc *in_wc,
940 struct ib_grh *in_grh,
941 struct ib_mad *in_mad,
942 struct ib_mad *out_mad);
943
944 struct module *owner;
945 struct class_device class_dev;
946 struct kobject ports_parent;
947 struct list_head port_list;
948
949 enum {
950 IB_DEV_UNINITIALIZED,
951 IB_DEV_REGISTERED,
952 IB_DEV_UNREGISTERED
953 } reg_state;
954
955 u8 node_type;
956 u8 phys_port_cnt;
957};
958
959struct ib_client {
960 char *name;
961 void (*add) (struct ib_device *);
962 void (*remove)(struct ib_device *);
963
964 struct list_head list;
965};
966
967struct ib_device *ib_alloc_device(size_t size);
968void ib_dealloc_device(struct ib_device *device);
969
970int ib_register_device (struct ib_device *device);
971void ib_unregister_device(struct ib_device *device);
972
973int ib_register_client (struct ib_client *client);
974void ib_unregister_client(struct ib_client *client);
975
976void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
977void ib_set_client_data(struct ib_device *device, struct ib_client *client,
978 void *data);
979
980static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
981{
982 return copy_from_user(dest, udata->inbuf, len) ? -EFAULT : 0;
983}
984
985static inline int ib_copy_to_udata(struct ib_udata *udata, void *src, size_t len)
986{
987 return copy_to_user(udata->outbuf, src, len) ? -EFAULT : 0;
988}
989
990int ib_register_event_handler (struct ib_event_handler *event_handler);
991int ib_unregister_event_handler(struct ib_event_handler *event_handler);
992void ib_dispatch_event(struct ib_event *event);
993
994int ib_query_device(struct ib_device *device,
995 struct ib_device_attr *device_attr);
996
997int ib_query_port(struct ib_device *device,
998 u8 port_num, struct ib_port_attr *port_attr);
999
1000int ib_query_gid(struct ib_device *device,
1001 u8 port_num, int index, union ib_gid *gid);
1002
1003int ib_query_pkey(struct ib_device *device,
1004 u8 port_num, u16 index, u16 *pkey);
1005
1006int ib_modify_device(struct ib_device *device,
1007 int device_modify_mask,
1008 struct ib_device_modify *device_modify);
1009
1010int ib_modify_port(struct ib_device *device,
1011 u8 port_num, int port_modify_mask,
1012 struct ib_port_modify *port_modify);
1013
1014/**
1015 * ib_alloc_pd - Allocates an unused protection domain.
1016 * @device: The device on which to allocate the protection domain.
1017 *
1018 * A protection domain object provides an association between QPs, shared
1019 * receive queues, address handles, memory regions, and memory windows.
1020 */
1021struct ib_pd *ib_alloc_pd(struct ib_device *device);
1022
1023/**
1024 * ib_dealloc_pd - Deallocates a protection domain.
1025 * @pd: The protection domain to deallocate.
1026 */
1027int ib_dealloc_pd(struct ib_pd *pd);
1028
1029/**
1030 * ib_create_ah - Creates an address handle for the given address vector.
1031 * @pd: The protection domain associated with the address handle.
1032 * @ah_attr: The attributes of the address vector.
1033 *
1034 * The address handle is used to reference a local or global destination
1035 * in all UD QP post sends.
1036 */
1037struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
1038
1039/**
1040 * ib_create_ah_from_wc - Creates an address handle associated with the
1041 * sender of the specified work completion.
1042 * @pd: The protection domain associated with the address handle.
1043 * @wc: Work completion information associated with a received message.
1044 * @grh: References the received global route header. This parameter is
1045 * ignored unless the work completion indicates that the GRH is valid.
1046 * @port_num: The outbound port number to associate with the address.
1047 *
1048 * The address handle is used to reference a local or global destination
1049 * in all UD QP post sends.
1050 */
1051struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
1052 struct ib_grh *grh, u8 port_num);
1053
1054/**
1055 * ib_modify_ah - Modifies the address vector associated with an address
1056 * handle.
1057 * @ah: The address handle to modify.
1058 * @ah_attr: The new address vector attributes to associate with the
1059 * address handle.
1060 */
1061int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1062
1063/**
1064 * ib_query_ah - Queries the address vector associated with an address
1065 * handle.
1066 * @ah: The address handle to query.
1067 * @ah_attr: The address vector attributes associated with the address
1068 * handle.
1069 */
1070int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
1071
1072/**
1073 * ib_destroy_ah - Destroys an address handle.
1074 * @ah: The address handle to destroy.
1075 */
1076int ib_destroy_ah(struct ib_ah *ah);
1077
1078/**
1079 * ib_create_srq - Creates a SRQ associated with the specified protection
1080 * domain.
1081 * @pd: The protection domain associated with the SRQ.
1082 * @srq_init_attr: A list of initial attributes required to create the SRQ.
1083 *
1084 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
1085 * requested size of the SRQ, and set to the actual values allocated
1086 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
1087 * will always be at least as large as the requested values.
1088 */
1089struct ib_srq *ib_create_srq(struct ib_pd *pd,
1090 struct ib_srq_init_attr *srq_init_attr);
1091
1092/**
1093 * ib_modify_srq - Modifies the attributes for the specified SRQ.
1094 * @srq: The SRQ to modify.
1095 * @srq_attr: On input, specifies the SRQ attributes to modify. On output,
1096 * the current values of selected SRQ attributes are returned.
1097 * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ
1098 * are being modified.
1099 *
1100 * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or
1101 * IB_SRQ_LIMIT to set the SRQ's limit and request notification when
1102 * the number of receives queued drops below the limit.
1103 */
1104int ib_modify_srq(struct ib_srq *srq,
1105 struct ib_srq_attr *srq_attr,
1106 enum ib_srq_attr_mask srq_attr_mask);
1107
1108/**
1109 * ib_query_srq - Returns the attribute list and current values for the
1110 * specified SRQ.
1111 * @srq: The SRQ to query.
1112 * @srq_attr: The attributes of the specified SRQ.
1113 */
1114int ib_query_srq(struct ib_srq *srq,
1115 struct ib_srq_attr *srq_attr);
1116
1117/**
1118 * ib_destroy_srq - Destroys the specified SRQ.
1119 * @srq: The SRQ to destroy.
1120 */
1121int ib_destroy_srq(struct ib_srq *srq);
1122
1123/**
1124 * ib_post_srq_recv - Posts a list of work requests to the specified SRQ.
1125 * @srq: The SRQ to post the work request on.
1126 * @recv_wr: A list of work requests to post on the receive queue.
1127 * @bad_recv_wr: On an immediate failure, this parameter will reference
1128 * the work request that failed to be posted on the QP.
1129 */
1130static inline int ib_post_srq_recv(struct ib_srq *srq,
1131 struct ib_recv_wr *recv_wr,
1132 struct ib_recv_wr **bad_recv_wr)
1133{
1134 return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
1135}
1136
1137/**
1138 * ib_create_qp - Creates a QP associated with the specified protection
1139 * domain.
1140 * @pd: The protection domain associated with the QP.
1141 * @qp_init_attr: A list of initial attributes required to create the QP.
1142 */
1143struct ib_qp *ib_create_qp(struct ib_pd *pd,
1144 struct ib_qp_init_attr *qp_init_attr);
1145
1146/**
1147 * ib_modify_qp - Modifies the attributes for the specified QP and then
1148 * transitions the QP to the given state.
1149 * @qp: The QP to modify.
1150 * @qp_attr: On input, specifies the QP attributes to modify. On output,
1151 * the current values of selected QP attributes are returned.
1152 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
1153 * are being modified.
1154 */
1155int ib_modify_qp(struct ib_qp *qp,
1156 struct ib_qp_attr *qp_attr,
1157 int qp_attr_mask);
1158
1159/**
1160 * ib_query_qp - Returns the attribute list and current values for the
1161 * specified QP.
1162 * @qp: The QP to query.
1163 * @qp_attr: The attributes of the specified QP.
1164 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
1165 * @qp_init_attr: Additional attributes of the selected QP.
1166 *
1167 * The qp_attr_mask may be used to limit the query to gathering only the
1168 * selected attributes.
1169 */
1170int ib_query_qp(struct ib_qp *qp,
1171 struct ib_qp_attr *qp_attr,
1172 int qp_attr_mask,
1173 struct ib_qp_init_attr *qp_init_attr);
1174
1175/**
1176 * ib_destroy_qp - Destroys the specified QP.
1177 * @qp: The QP to destroy.
1178 */
1179int ib_destroy_qp(struct ib_qp *qp);
1180
1181/**
1182 * ib_post_send - Posts a list of work requests to the send queue of
1183 * the specified QP.
1184 * @qp: The QP to post the work request on.
1185 * @send_wr: A list of work requests to post on the send queue.
1186 * @bad_send_wr: On an immediate failure, this parameter will reference
1187 * the work request that failed to be posted on the QP.
1188 */
1189static inline int ib_post_send(struct ib_qp *qp,
1190 struct ib_send_wr *send_wr,
1191 struct ib_send_wr **bad_send_wr)
1192{
1193 return qp->device->post_send(qp, send_wr, bad_send_wr);
1194}
1195
1196/**
1197 * ib_post_recv - Posts a list of work requests to the receive queue of
1198 * the specified QP.
1199 * @qp: The QP to post the work request on.
1200 * @recv_wr: A list of work requests to post on the receive queue.
1201 * @bad_recv_wr: On an immediate failure, this parameter will reference
1202 * the work request that failed to be posted on the QP.
1203 */
1204static inline int ib_post_recv(struct ib_qp *qp,
1205 struct ib_recv_wr *recv_wr,
1206 struct ib_recv_wr **bad_recv_wr)
1207{
1208 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1209}
1210
1211/**
1212 * ib_create_cq - Creates a CQ on the specified device.
1213 * @device: The device on which to create the CQ.
1214 * @comp_handler: A user-specified callback that is invoked when a
1215 * completion event occurs on the CQ.
1216 * @event_handler: A user-specified callback that is invoked when an
1217 * asynchronous event not associated with a completion occurs on the CQ.
1218 * @cq_context: Context associated with the CQ returned to the user via
1219 * the associated completion and event handlers.
1220 * @cqe: The minimum size of the CQ.
1221 *
1222 * Users can examine the cq structure to determine the actual CQ size.
1223 */
1224struct ib_cq *ib_create_cq(struct ib_device *device,
1225 ib_comp_handler comp_handler,
1226 void (*event_handler)(struct ib_event *, void *),
1227 void *cq_context, int cqe);
1228
1229/**
1230 * ib_resize_cq - Modifies the capacity of the CQ.
1231 * @cq: The CQ to resize.
1232 * @cqe: The minimum size of the CQ.
1233 *
1234 * Users can examine the cq structure to determine the actual CQ size.
1235 */
1236int ib_resize_cq(struct ib_cq *cq, int cqe);
1237
1238/**
1239 * ib_destroy_cq - Destroys the specified CQ.
1240 * @cq: The CQ to destroy.
1241 */
1242int ib_destroy_cq(struct ib_cq *cq);
1243
1244/**
1245 * ib_poll_cq - poll a CQ for completion(s)
1246 * @cq:the CQ being polled
1247 * @num_entries:maximum number of completions to return
1248 * @wc:array of at least @num_entries &struct ib_wc where completions
1249 * will be returned
1250 *
1251 * Poll a CQ for (possibly multiple) completions. If the return value
1252 * is < 0, an error occurred. If the return value is >= 0, it is the
1253 * number of completions returned. If the return value is
1254 * non-negative and < num_entries, then the CQ was emptied.
1255 */
1256static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1257 struct ib_wc *wc)
1258{
1259 return cq->device->poll_cq(cq, num_entries, wc);
1260}
1261
1262/**
1263 * ib_peek_cq - Returns the number of unreaped completions currently
1264 * on the specified CQ.
1265 * @cq: The CQ to peek.
1266 * @wc_cnt: A minimum number of unreaped completions to check for.
1267 *
1268 * If the number of unreaped completions is greater than or equal to wc_cnt,
1269 * this function returns wc_cnt, otherwise, it returns the actual number of
1270 * unreaped completions.
1271 */
1272int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1273
1274/**
1275 * ib_req_notify_cq - Request completion notification on a CQ.
1276 * @cq: The CQ to generate an event for.
1277 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
1278 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
1279 * notification will occur on the next completion.
1280 */
1281static inline int ib_req_notify_cq(struct ib_cq *cq,
1282 enum ib_cq_notify cq_notify)
1283{
1284 return cq->device->req_notify_cq(cq, cq_notify);
1285}
1286
1287/**
1288 * ib_req_ncomp_notif - Request completion notification when there are
1289 * at least the specified number of unreaped completions on the CQ.
1290 * @cq: The CQ to generate an event for.
1291 * @wc_cnt: The number of unreaped completions that should be on the
1292 * CQ before an event is generated.
1293 */
1294static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1295{
1296 return cq->device->req_ncomp_notif ?
1297 cq->device->req_ncomp_notif(cq, wc_cnt) :
1298 -ENOSYS;
1299}
1300
1301/**
1302 * ib_get_dma_mr - Returns a memory region for system memory that is
1303 * usable for DMA.
1304 * @pd: The protection domain associated with the memory region.
1305 * @mr_access_flags: Specifies the memory access rights.
1306 */
1307struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1308
1309/**
1310 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1311 * by an HCA.
1312 * @pd: The protection domain associated assigned to the registered region.
1313 * @phys_buf_array: Specifies a list of physical buffers to use in the
1314 * memory region.
1315 * @num_phys_buf: Specifies the size of the phys_buf_array.
1316 * @mr_access_flags: Specifies the memory access rights.
1317 * @iova_start: The offset of the region's starting I/O virtual address.
1318 */
1319struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1320 struct ib_phys_buf *phys_buf_array,
1321 int num_phys_buf,
1322 int mr_access_flags,
1323 u64 *iova_start);
1324
1325/**
1326 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1327 * Conceptually, this call performs the functions deregister memory region
1328 * followed by register physical memory region. Where possible,
1329 * resources are reused instead of deallocated and reallocated.
1330 * @mr: The memory region to modify.
1331 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1332 * properties of the memory region are being modified.
1333 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1334 * the new protection domain to associated with the memory region,
1335 * otherwise, this parameter is ignored.
1336 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1337 * field specifies a list of physical buffers to use in the new
1338 * translation, otherwise, this parameter is ignored.
1339 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1340 * field specifies the size of the phys_buf_array, otherwise, this
1341 * parameter is ignored.
1342 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1343 * field specifies the new memory access rights, otherwise, this
1344 * parameter is ignored.
1345 * @iova_start: The offset of the region's starting I/O virtual address.
1346 */
1347int ib_rereg_phys_mr(struct ib_mr *mr,
1348 int mr_rereg_mask,
1349 struct ib_pd *pd,
1350 struct ib_phys_buf *phys_buf_array,
1351 int num_phys_buf,
1352 int mr_access_flags,
1353 u64 *iova_start);
1354
1355/**
1356 * ib_query_mr - Retrieves information about a specific memory region.
1357 * @mr: The memory region to retrieve information about.
1358 * @mr_attr: The attributes of the specified memory region.
1359 */
1360int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1361
1362/**
1363 * ib_dereg_mr - Deregisters a memory region and removes it from the
1364 * HCA translation table.
1365 * @mr: The memory region to deregister.
1366 */
1367int ib_dereg_mr(struct ib_mr *mr);
1368
1369/**
1370 * ib_alloc_mw - Allocates a memory window.
1371 * @pd: The protection domain associated with the memory window.
1372 */
1373struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1374
1375/**
1376 * ib_bind_mw - Posts a work request to the send queue of the specified
1377 * QP, which binds the memory window to the given address range and
1378 * remote access attributes.
1379 * @qp: QP to post the bind work request on.
1380 * @mw: The memory window to bind.
1381 * @mw_bind: Specifies information about the memory window, including
1382 * its address range, remote access rights, and associated memory region.
1383 */
1384static inline int ib_bind_mw(struct ib_qp *qp,
1385 struct ib_mw *mw,
1386 struct ib_mw_bind *mw_bind)
1387{
1388 /* XXX reference counting in corresponding MR? */
1389 return mw->device->bind_mw ?
1390 mw->device->bind_mw(qp, mw, mw_bind) :
1391 -ENOSYS;
1392}
1393
1394/**
1395 * ib_dealloc_mw - Deallocates a memory window.
1396 * @mw: The memory window to deallocate.
1397 */
1398int ib_dealloc_mw(struct ib_mw *mw);
1399
1400/**
1401 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1402 * @pd: The protection domain associated with the unmapped region.
1403 * @mr_access_flags: Specifies the memory access rights.
1404 * @fmr_attr: Attributes of the unmapped region.
1405 *
1406 * A fast memory region must be mapped before it can be used as part of
1407 * a work request.
1408 */
1409struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1410 int mr_access_flags,
1411 struct ib_fmr_attr *fmr_attr);
1412
1413/**
1414 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1415 * @fmr: The fast memory region to associate with the pages.
1416 * @page_list: An array of physical pages to map to the fast memory region.
1417 * @list_len: The number of pages in page_list.
1418 * @iova: The I/O virtual address to use with the mapped region.
1419 */
1420static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1421 u64 *page_list, int list_len,
1422 u64 iova)
1423{
1424 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1425}
1426
1427/**
1428 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1429 * @fmr_list: A linked list of fast memory regions to unmap.
1430 */
1431int ib_unmap_fmr(struct list_head *fmr_list);
1432
1433/**
1434 * ib_dealloc_fmr - Deallocates a fast memory region.
1435 * @fmr: The fast memory region to deallocate.
1436 */
1437int ib_dealloc_fmr(struct ib_fmr *fmr);
1438
1439/**
1440 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1441 * @qp: QP to attach to the multicast group. The QP must be type
1442 * IB_QPT_UD.
1443 * @gid: Multicast group GID.
1444 * @lid: Multicast group LID in host byte order.
1445 *
1446 * In order to send and receive multicast packets, subnet
1447 * administration must have created the multicast group and configured
1448 * the fabric appropriately. The port associated with the specified
1449 * QP must also be a member of the multicast group.
1450 */
1451int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1452
1453/**
1454 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1455 * @qp: QP to detach from the multicast group.
1456 * @gid: Multicast group GID.
1457 * @lid: Multicast group LID in host byte order.
1458 */
1459int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1460
1461#endif /* IB_VERBS_H */