aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/infiniband/include
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/infiniband/include')
-rw-r--r--drivers/infiniband/include/ib_cache.h103
-rw-r--r--drivers/infiniband/include/ib_fmr_pool.h92
-rw-r--r--drivers/infiniband/include/ib_mad.h404
-rw-r--r--drivers/infiniband/include/ib_pack.h245
-rw-r--r--drivers/infiniband/include/ib_sa.h308
-rw-r--r--drivers/infiniband/include/ib_smi.h96
-rw-r--r--drivers/infiniband/include/ib_user_mad.h123
-rw-r--r--drivers/infiniband/include/ib_verbs.h1252
8 files changed, 2623 insertions, 0 deletions
diff --git a/drivers/infiniband/include/ib_cache.h b/drivers/infiniband/include/ib_cache.h
new file mode 100644
index 000000000000..44ef6bb9b9df
--- /dev/null
+++ b/drivers/infiniband/include/ib_cache.h
@@ -0,0 +1,103 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_cache.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef _IB_CACHE_H
36#define _IB_CACHE_H
37
38#include <ib_verbs.h>
39
40/**
41 * ib_get_cached_gid - Returns a cached GID table entry
42 * @device: The device to query.
43 * @port_num: The port number of the device to query.
44 * @index: The index into the cached GID table to query.
45 * @gid: The GID value found at the specified index.
46 *
47 * ib_get_cached_gid() fetches the specified GID table entry stored in
48 * the local software cache.
49 */
50int ib_get_cached_gid(struct ib_device *device,
51 u8 port_num,
52 int index,
53 union ib_gid *gid);
54
55/**
56 * ib_find_cached_gid - Returns the port number and GID table index where
57 * a specified GID value occurs.
58 * @device: The device to query.
59 * @gid: The GID value to search for.
60 * @port_num: The port number of the device where the GID value was found.
61 * @index: The index into the cached GID table where the GID was found. This
62 * parameter may be NULL.
63 *
64 * ib_find_cached_gid() searches for the specified GID value in
65 * the local software cache.
66 */
67int ib_find_cached_gid(struct ib_device *device,
68 union ib_gid *gid,
69 u8 *port_num,
70 u16 *index);
71
72/**
73 * ib_get_cached_pkey - Returns a cached PKey table entry
74 * @device: The device to query.
75 * @port_num: The port number of the device to query.
76 * @index: The index into the cached PKey table to query.
77 * @pkey: The PKey value found at the specified index.
78 *
79 * ib_get_cached_pkey() fetches the specified PKey table entry stored in
80 * the local software cache.
81 */
82int ib_get_cached_pkey(struct ib_device *device_handle,
83 u8 port_num,
84 int index,
85 u16 *pkey);
86
87/**
88 * ib_find_cached_pkey - Returns the PKey table index where a specified
89 * PKey value occurs.
90 * @device: The device to query.
91 * @port_num: The port number of the device to search for the PKey.
92 * @pkey: The PKey value to search for.
93 * @index: The index into the cached PKey table where the PKey was found.
94 *
95 * ib_find_cached_pkey() searches the specified PKey table in
96 * the local software cache.
97 */
98int ib_find_cached_pkey(struct ib_device *device,
99 u8 port_num,
100 u16 pkey,
101 u16 *index);
102
103#endif /* _IB_CACHE_H */
diff --git a/drivers/infiniband/include/ib_fmr_pool.h b/drivers/infiniband/include/ib_fmr_pool.h
new file mode 100644
index 000000000000..e8769657cbbb
--- /dev/null
+++ b/drivers/infiniband/include/ib_fmr_pool.h
@@ -0,0 +1,92 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_fmr_pool.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#if !defined(IB_FMR_POOL_H)
36#define IB_FMR_POOL_H
37
38#include <ib_verbs.h>
39
40struct ib_fmr_pool;
41
42/**
43 * struct ib_fmr_pool_param - Parameters for creating FMR pool
44 * @max_pages_per_fmr:Maximum number of pages per map request.
45 * @access:Access flags for FMRs in pool.
46 * @pool_size:Number of FMRs to allocate for pool.
47 * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
48 * FMRs are present.
49 * @flush_function:Callback called when unmapped FMRs are flushed and
50 * more FMRs are possibly available for mapping
51 * @flush_arg:Context passed to user's flush function.
52 * @cache:If set, FMRs may be reused after unmapping for identical map
53 * requests.
54 */
55struct ib_fmr_pool_param {
56 int max_pages_per_fmr;
57 enum ib_access_flags access;
58 int pool_size;
59 int dirty_watermark;
60 void (*flush_function)(struct ib_fmr_pool *pool,
61 void * arg);
62 void *flush_arg;
63 unsigned cache:1;
64};
65
66struct ib_pool_fmr {
67 struct ib_fmr *fmr;
68 struct ib_fmr_pool *pool;
69 struct list_head list;
70 struct hlist_node cache_node;
71 int ref_count;
72 int remap_count;
73 u64 io_virtual_address;
74 int page_list_len;
75 u64 page_list[0];
76};
77
78struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
79 struct ib_fmr_pool_param *params);
80
81int ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
82
83int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
84
85struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
86 u64 *page_list,
87 int list_len,
88 u64 *io_virtual_address);
89
90int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
91
92#endif /* IB_FMR_POOL_H */
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
new file mode 100644
index 000000000000..4a6bf6763a97
--- /dev/null
+++ b/drivers/infiniband/include/ib_mad.h
@@ -0,0 +1,404 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_mad.h 1389 2004-12-27 22:56:47Z roland $
37 */
38
39#if !defined( IB_MAD_H )
40#define IB_MAD_H
41
42#include <ib_verbs.h>
43
44/* Management base version */
45#define IB_MGMT_BASE_VERSION 1
46
47/* Management classes */
48#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01
49#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81
50#define IB_MGMT_CLASS_SUBN_ADM 0x03
51#define IB_MGMT_CLASS_PERF_MGMT 0x04
52#define IB_MGMT_CLASS_BM 0x05
53#define IB_MGMT_CLASS_DEVICE_MGMT 0x06
54#define IB_MGMT_CLASS_CM 0x07
55#define IB_MGMT_CLASS_SNMP 0x08
56#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
57#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
58
59/* Management methods */
60#define IB_MGMT_METHOD_GET 0x01
61#define IB_MGMT_METHOD_SET 0x02
62#define IB_MGMT_METHOD_GET_RESP 0x81
63#define IB_MGMT_METHOD_SEND 0x03
64#define IB_MGMT_METHOD_TRAP 0x05
65#define IB_MGMT_METHOD_REPORT 0x06
66#define IB_MGMT_METHOD_REPORT_RESP 0x86
67#define IB_MGMT_METHOD_TRAP_REPRESS 0x07
68
69#define IB_MGMT_METHOD_RESP 0x80
70
71#define IB_MGMT_MAX_METHODS 128
72
73#define IB_QP0 0
74#define IB_QP1 __constant_htonl(1)
75#define IB_QP1_QKEY 0x80010000
76
77struct ib_grh {
78 u32 version_tclass_flow;
79 u16 paylen;
80 u8 next_hdr;
81 u8 hop_limit;
82 union ib_gid sgid;
83 union ib_gid dgid;
84} __attribute__ ((packed));
85
86struct ib_mad_hdr {
87 u8 base_version;
88 u8 mgmt_class;
89 u8 class_version;
90 u8 method;
91 u16 status;
92 u16 class_specific;
93 u64 tid;
94 u16 attr_id;
95 u16 resv;
96 u32 attr_mod;
97} __attribute__ ((packed));
98
99struct ib_rmpp_hdr {
100 u8 rmpp_version;
101 u8 rmpp_type;
102 u8 rmpp_rtime_flags;
103 u8 rmpp_status;
104 u32 seg_num;
105 u32 paylen_newwin;
106} __attribute__ ((packed));
107
108struct ib_mad {
109 struct ib_mad_hdr mad_hdr;
110 u8 data[232];
111} __attribute__ ((packed));
112
113struct ib_rmpp_mad {
114 struct ib_mad_hdr mad_hdr;
115 struct ib_rmpp_hdr rmpp_hdr;
116 u8 data[220];
117} __attribute__ ((packed));
118
119struct ib_vendor_mad {
120 struct ib_mad_hdr mad_hdr;
121 struct ib_rmpp_hdr rmpp_hdr;
122 u8 reserved;
123 u8 oui[3];
124 u8 data[216];
125} __attribute__ ((packed));
126
127struct ib_mad_agent;
128struct ib_mad_send_wc;
129struct ib_mad_recv_wc;
130
131/**
132 * ib_mad_send_handler - callback handler for a sent MAD.
133 * @mad_agent: MAD agent that sent the MAD.
134 * @mad_send_wc: Send work completion information on the sent MAD.
135 */
136typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
137 struct ib_mad_send_wc *mad_send_wc);
138
139/**
140 * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
141 * @mad_agent: MAD agent that snooped the MAD.
142 * @send_wr: Work request information on the sent MAD.
143 * @mad_send_wc: Work completion information on the sent MAD. Valid
144 * only for snooping that occurs on a send completion.
145 *
146 * Clients snooping MADs should not modify data referenced by the @send_wr
147 * or @mad_send_wc.
148 */
149typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
150 struct ib_send_wr *send_wr,
151 struct ib_mad_send_wc *mad_send_wc);
152
153/**
154 * ib_mad_recv_handler - callback handler for a received MAD.
155 * @mad_agent: MAD agent requesting the received MAD.
156 * @mad_recv_wc: Received work completion information on the received MAD.
157 *
158 * MADs received in response to a send request operation will be handed to
159 * the user after the send operation completes. All data buffers given
160 * to registered agents through this routine are owned by the receiving
161 * client, except for snooping agents. Clients snooping MADs should not
162 * modify the data referenced by @mad_recv_wc.
163 */
164typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
165 struct ib_mad_recv_wc *mad_recv_wc);
166
167/**
168 * ib_mad_agent - Used to track MAD registration with the access layer.
169 * @device: Reference to device registration is on.
170 * @qp: Reference to QP used for sending and receiving MADs.
171 * @recv_handler: Callback handler for a received MAD.
172 * @send_handler: Callback handler for a sent MAD.
173 * @snoop_handler: Callback handler for snooped sent MADs.
174 * @context: User-specified context associated with this registration.
175 * @hi_tid: Access layer assigned transaction ID for this client.
176 * Unsolicited MADs sent by this client will have the upper 32-bits
177 * of their TID set to this value.
178 * @port_num: Port number on which QP is registered
179 */
180struct ib_mad_agent {
181 struct ib_device *device;
182 struct ib_qp *qp;
183 ib_mad_recv_handler recv_handler;
184 ib_mad_send_handler send_handler;
185 ib_mad_snoop_handler snoop_handler;
186 void *context;
187 u32 hi_tid;
188 u8 port_num;
189};
190
191/**
192 * ib_mad_send_wc - MAD send completion information.
193 * @wr_id: Work request identifier associated with the send MAD request.
194 * @status: Completion status.
195 * @vendor_err: Optional vendor error information returned with a failed
196 * request.
197 */
198struct ib_mad_send_wc {
199 u64 wr_id;
200 enum ib_wc_status status;
201 u32 vendor_err;
202};
203
204/**
205 * ib_mad_recv_buf - received MAD buffer information.
206 * @list: Reference to next data buffer for a received RMPP MAD.
207 * @grh: References a data buffer containing the global route header.
208 * The data refereced by this buffer is only valid if the GRH is
209 * valid.
210 * @mad: References the start of the received MAD.
211 */
212struct ib_mad_recv_buf {
213 struct list_head list;
214 struct ib_grh *grh;
215 struct ib_mad *mad;
216};
217
218/**
219 * ib_mad_recv_wc - received MAD information.
220 * @wc: Completion information for the received data.
221 * @recv_buf: Specifies the location of the received data buffer(s).
222 * @mad_len: The length of the received MAD, without duplicated headers.
223 *
224 * For received response, the wr_id field of the wc is set to the wr_id
225 * for the corresponding send request.
226 */
227struct ib_mad_recv_wc {
228 struct ib_wc *wc;
229 struct ib_mad_recv_buf recv_buf;
230 int mad_len;
231};
232
233/**
234 * ib_mad_reg_req - MAD registration request
235 * @mgmt_class: Indicates which management class of MADs should be receive
236 * by the caller. This field is only required if the user wishes to
237 * receive unsolicited MADs, otherwise it should be 0.
238 * @mgmt_class_version: Indicates which version of MADs for the given
239 * management class to receive.
240 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
241 * in the range from 0x30 to 0x4f. Otherwise not used.
242 * @method_mask: The caller will receive unsolicited MADs for any method
243 * where @method_mask = 1.
244 */
245struct ib_mad_reg_req {
246 u8 mgmt_class;
247 u8 mgmt_class_version;
248 u8 oui[3];
249 DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS);
250};
251
252/**
253 * ib_register_mad_agent - Register to send/receive MADs.
254 * @device: The device to register with.
255 * @port_num: The port on the specified device to use.
256 * @qp_type: Specifies which QP to access. Must be either
257 * IB_QPT_SMI or IB_QPT_GSI.
258 * @mad_reg_req: Specifies which unsolicited MADs should be received
259 * by the caller. This parameter may be NULL if the caller only
260 * wishes to receive solicited responses.
261 * @rmpp_version: If set, indicates that the client will send
262 * and receive MADs that contain the RMPP header for the given version.
263 * If set to 0, indicates that RMPP is not used by this client.
264 * @send_handler: The completion callback routine invoked after a send
265 * request has completed.
266 * @recv_handler: The completion callback routine invoked for a received
267 * MAD.
268 * @context: User specified context associated with the registration.
269 */
270struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
271 u8 port_num,
272 enum ib_qp_type qp_type,
273 struct ib_mad_reg_req *mad_reg_req,
274 u8 rmpp_version,
275 ib_mad_send_handler send_handler,
276 ib_mad_recv_handler recv_handler,
277 void *context);
278
279enum ib_mad_snoop_flags {
280 /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
281 /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
282 IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
283 /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
284 IB_MAD_SNOOP_RECVS = (1<<4)
285 /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
286 /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
287};
288
289/**
290 * ib_register_mad_snoop - Register to snoop sent and received MADs.
291 * @device: The device to register with.
292 * @port_num: The port on the specified device to use.
293 * @qp_type: Specifies which QP traffic to snoop. Must be either
294 * IB_QPT_SMI or IB_QPT_GSI.
295 * @mad_snoop_flags: Specifies information where snooping occurs.
296 * @send_handler: The callback routine invoked for a snooped send.
297 * @recv_handler: The callback routine invoked for a snooped receive.
298 * @context: User specified context associated with the registration.
299 */
300struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
301 u8 port_num,
302 enum ib_qp_type qp_type,
303 int mad_snoop_flags,
304 ib_mad_snoop_handler snoop_handler,
305 ib_mad_recv_handler recv_handler,
306 void *context);
307
308/**
309 * ib_unregister_mad_agent - Unregisters a client from using MAD services.
310 * @mad_agent: Corresponding MAD registration request to deregister.
311 *
312 * After invoking this routine, MAD services are no longer usable by the
313 * client on the associated QP.
314 */
315int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
316
317/**
318 * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
319 * with the registered client.
320 * @mad_agent: Specifies the associated registration to post the send to.
321 * @send_wr: Specifies the information needed to send the MAD(s).
322 * @bad_send_wr: Specifies the MAD on which an error was encountered.
323 *
324 * Sent MADs are not guaranteed to complete in the order that they were posted.
325 */
326int ib_post_send_mad(struct ib_mad_agent *mad_agent,
327 struct ib_send_wr *send_wr,
328 struct ib_send_wr **bad_send_wr);
329
330/**
331 * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer.
332 * @mad_recv_wc: Work completion information for a received MAD.
333 * @buf: User-provided data buffer to receive the coalesced buffers. The
334 * referenced buffer should be at least the size of the mad_len specified
335 * by @mad_recv_wc.
336 *
337 * This call copies a chain of received RMPP MADs into a single data buffer,
338 * removing duplicated headers.
339 */
340void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc,
341 void *buf);
342
343/**
344 * ib_free_recv_mad - Returns data buffers used to receive a MAD to the
345 * access layer.
346 * @mad_recv_wc: Work completion information for a received MAD.
347 *
348 * Clients receiving MADs through their ib_mad_recv_handler must call this
349 * routine to return the work completion buffers to the access layer.
350 */
351void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc);
352
353/**
354 * ib_cancel_mad - Cancels an outstanding send MAD operation.
355 * @mad_agent: Specifies the registration associated with sent MAD.
356 * @wr_id: Indicates the work request identifier of the MAD to cancel.
357 *
358 * MADs will be returned to the user through the corresponding
359 * ib_mad_send_handler.
360 */
361void ib_cancel_mad(struct ib_mad_agent *mad_agent,
362 u64 wr_id);
363
364/**
365 * ib_redirect_mad_qp - Registers a QP for MAD services.
366 * @qp: Reference to a QP that requires MAD services.
367 * @rmpp_version: If set, indicates that the client will send
368 * and receive MADs that contain the RMPP header for the given version.
369 * If set to 0, indicates that RMPP is not used by this client.
370 * @send_handler: The completion callback routine invoked after a send
371 * request has completed.
372 * @recv_handler: The completion callback routine invoked for a received
373 * MAD.
374 * @context: User specified context associated with the registration.
375 *
376 * Use of this call allows clients to use MAD services, such as RMPP,
377 * on user-owned QPs. After calling this routine, users may send
378 * MADs on the specified QP by calling ib_mad_post_send.
379 */
380struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
381 u8 rmpp_version,
382 ib_mad_send_handler send_handler,
383 ib_mad_recv_handler recv_handler,
384 void *context);
385
386/**
387 * ib_process_mad_wc - Processes a work completion associated with a
388 * MAD sent or received on a redirected QP.
389 * @mad_agent: Specifies the registered MAD service using the redirected QP.
390 * @wc: References a work completion associated with a sent or received
391 * MAD segment.
392 *
393 * This routine is used to complete or continue processing on a MAD request.
394 * If the work completion is associated with a send operation, calling
395 * this routine is required to continue an RMPP transfer or to wait for a
396 * corresponding response, if it is a request. If the work completion is
397 * associated with a receive operation, calling this routine is required to
398 * process an inbound or outbound RMPP transfer, or to match a response MAD
399 * with its corresponding request.
400 */
401int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
402 struct ib_wc *wc);
403
404#endif /* IB_MAD_H */
diff --git a/drivers/infiniband/include/ib_pack.h b/drivers/infiniband/include/ib_pack.h
new file mode 100644
index 000000000000..fe480f3e8654
--- /dev/null
+++ b/drivers/infiniband/include/ib_pack.h
@@ -0,0 +1,245 @@
1/*
2 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_pack.h 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#ifndef IB_PACK_H
36#define IB_PACK_H
37
38#include <ib_verbs.h>
39
40enum {
41 IB_LRH_BYTES = 8,
42 IB_GRH_BYTES = 40,
43 IB_BTH_BYTES = 12,
44 IB_DETH_BYTES = 8
45};
46
47struct ib_field {
48 size_t struct_offset_bytes;
49 size_t struct_size_bytes;
50 int offset_words;
51 int offset_bits;
52 int size_bits;
53 char *field_name;
54};
55
56#define RESERVED \
57 .field_name = "reserved"
58
59/*
60 * This macro cleans up the definitions of constants for BTH opcodes.
61 * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY,
62 * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives
63 * the correct value.
64 *
65 * In short, user code should use the constants defined using the
66 * macro rather than worrying about adding together other constants.
67*/
68#define IB_OPCODE(transport, op) \
69 IB_OPCODE_ ## transport ## _ ## op = \
70 IB_OPCODE_ ## transport + IB_OPCODE_ ## op
71
72enum {
73 /* transport types -- just used to define real constants */
74 IB_OPCODE_RC = 0x00,
75 IB_OPCODE_UC = 0x20,
76 IB_OPCODE_RD = 0x40,
77 IB_OPCODE_UD = 0x60,
78
79 /* operations -- just used to define real constants */
80 IB_OPCODE_SEND_FIRST = 0x00,
81 IB_OPCODE_SEND_MIDDLE = 0x01,
82 IB_OPCODE_SEND_LAST = 0x02,
83 IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03,
84 IB_OPCODE_SEND_ONLY = 0x04,
85 IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05,
86 IB_OPCODE_RDMA_WRITE_FIRST = 0x06,
87 IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07,
88 IB_OPCODE_RDMA_WRITE_LAST = 0x08,
89 IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09,
90 IB_OPCODE_RDMA_WRITE_ONLY = 0x0a,
91 IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b,
92 IB_OPCODE_RDMA_READ_REQUEST = 0x0c,
93 IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d,
94 IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e,
95 IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f,
96 IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10,
97 IB_OPCODE_ACKNOWLEDGE = 0x11,
98 IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12,
99 IB_OPCODE_COMPARE_SWAP = 0x13,
100 IB_OPCODE_FETCH_ADD = 0x14,
101
102 /* real constants follow -- see comment about above IB_OPCODE()
103 macro for more details */
104
105 /* RC */
106 IB_OPCODE(RC, SEND_FIRST),
107 IB_OPCODE(RC, SEND_MIDDLE),
108 IB_OPCODE(RC, SEND_LAST),
109 IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE),
110 IB_OPCODE(RC, SEND_ONLY),
111 IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE),
112 IB_OPCODE(RC, RDMA_WRITE_FIRST),
113 IB_OPCODE(RC, RDMA_WRITE_MIDDLE),
114 IB_OPCODE(RC, RDMA_WRITE_LAST),
115 IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
116 IB_OPCODE(RC, RDMA_WRITE_ONLY),
117 IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
118 IB_OPCODE(RC, RDMA_READ_REQUEST),
119 IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST),
120 IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE),
121 IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST),
122 IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY),
123 IB_OPCODE(RC, ACKNOWLEDGE),
124 IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE),
125 IB_OPCODE(RC, COMPARE_SWAP),
126 IB_OPCODE(RC, FETCH_ADD),
127
128 /* UC */
129 IB_OPCODE(UC, SEND_FIRST),
130 IB_OPCODE(UC, SEND_MIDDLE),
131 IB_OPCODE(UC, SEND_LAST),
132 IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE),
133 IB_OPCODE(UC, SEND_ONLY),
134 IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE),
135 IB_OPCODE(UC, RDMA_WRITE_FIRST),
136 IB_OPCODE(UC, RDMA_WRITE_MIDDLE),
137 IB_OPCODE(UC, RDMA_WRITE_LAST),
138 IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE),
139 IB_OPCODE(UC, RDMA_WRITE_ONLY),
140 IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
141
142 /* RD */
143 IB_OPCODE(RD, SEND_FIRST),
144 IB_OPCODE(RD, SEND_MIDDLE),
145 IB_OPCODE(RD, SEND_LAST),
146 IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE),
147 IB_OPCODE(RD, SEND_ONLY),
148 IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE),
149 IB_OPCODE(RD, RDMA_WRITE_FIRST),
150 IB_OPCODE(RD, RDMA_WRITE_MIDDLE),
151 IB_OPCODE(RD, RDMA_WRITE_LAST),
152 IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE),
153 IB_OPCODE(RD, RDMA_WRITE_ONLY),
154 IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE),
155 IB_OPCODE(RD, RDMA_READ_REQUEST),
156 IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST),
157 IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE),
158 IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST),
159 IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY),
160 IB_OPCODE(RD, ACKNOWLEDGE),
161 IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE),
162 IB_OPCODE(RD, COMPARE_SWAP),
163 IB_OPCODE(RD, FETCH_ADD),
164
165 /* UD */
166 IB_OPCODE(UD, SEND_ONLY),
167 IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE)
168};
169
170enum {
171 IB_LNH_RAW = 0,
172 IB_LNH_IP = 1,
173 IB_LNH_IBA_LOCAL = 2,
174 IB_LNH_IBA_GLOBAL = 3
175};
176
177struct ib_unpacked_lrh {
178 u8 virtual_lane;
179 u8 link_version;
180 u8 service_level;
181 u8 link_next_header;
182 __be16 destination_lid;
183 __be16 packet_length;
184 __be16 source_lid;
185};
186
187struct ib_unpacked_grh {
188 u8 ip_version;
189 u8 traffic_class;
190 __be32 flow_label;
191 __be16 payload_length;
192 u8 next_header;
193 u8 hop_limit;
194 union ib_gid source_gid;
195 union ib_gid destination_gid;
196};
197
198struct ib_unpacked_bth {
199 u8 opcode;
200 u8 solicited_event;
201 u8 mig_req;
202 u8 pad_count;
203 u8 transport_header_version;
204 __be16 pkey;
205 __be32 destination_qpn;
206 u8 ack_req;
207 __be32 psn;
208};
209
210struct ib_unpacked_deth {
211 __be32 qkey;
212 __be32 source_qpn;
213};
214
215struct ib_ud_header {
216 struct ib_unpacked_lrh lrh;
217 int grh_present;
218 struct ib_unpacked_grh grh;
219 struct ib_unpacked_bth bth;
220 struct ib_unpacked_deth deth;
221 int immediate_present;
222 __be32 immediate_data;
223};
224
225void ib_pack(const struct ib_field *desc,
226 int desc_len,
227 void *structure,
228 void *buf);
229
230void ib_unpack(const struct ib_field *desc,
231 int desc_len,
232 void *buf,
233 void *structure);
234
235void ib_ud_header_init(int payload_bytes,
236 int grh_present,
237 struct ib_ud_header *header);
238
239int ib_ud_header_pack(struct ib_ud_header *header,
240 void *buf);
241
242int ib_ud_header_unpack(void *buf,
243 struct ib_ud_header *header);
244
245#endif /* IB_PACK_H */
diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h
new file mode 100644
index 000000000000..f4f747707b30
--- /dev/null
+++ b/drivers/infiniband/include/ib_sa.h
@@ -0,0 +1,308 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_sa.h 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#ifndef IB_SA_H
36#define IB_SA_H
37
38#include <linux/compiler.h>
39
40#include <ib_verbs.h>
41#include <ib_mad.h>
42
43enum {
44 IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
45
46 IB_SA_METHOD_DELETE = 0x15
47};
48
49enum ib_sa_selector {
50 IB_SA_GTE = 0,
51 IB_SA_LTE = 1,
52 IB_SA_EQ = 2,
53 /*
54 * The meaning of "best" depends on the attribute: for
55 * example, for MTU best will return the largest available
56 * MTU, while for packet life time, best will return the
57 * smallest available life time.
58 */
59 IB_SA_BEST = 3
60};
61
62enum ib_sa_rate {
63 IB_SA_RATE_2_5_GBPS = 2,
64 IB_SA_RATE_5_GBPS = 5,
65 IB_SA_RATE_10_GBPS = 3,
66 IB_SA_RATE_20_GBPS = 6,
67 IB_SA_RATE_30_GBPS = 4,
68 IB_SA_RATE_40_GBPS = 7,
69 IB_SA_RATE_60_GBPS = 8,
70 IB_SA_RATE_80_GBPS = 9,
71 IB_SA_RATE_120_GBPS = 10
72};
73
74static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
75{
76 switch (rate) {
77 case IB_SA_RATE_2_5_GBPS: return 1;
78 case IB_SA_RATE_5_GBPS: return 2;
79 case IB_SA_RATE_10_GBPS: return 4;
80 case IB_SA_RATE_20_GBPS: return 8;
81 case IB_SA_RATE_30_GBPS: return 12;
82 case IB_SA_RATE_40_GBPS: return 16;
83 case IB_SA_RATE_60_GBPS: return 24;
84 case IB_SA_RATE_80_GBPS: return 32;
85 case IB_SA_RATE_120_GBPS: return 48;
86 default: return -1;
87 }
88}
89
90typedef u64 __bitwise ib_sa_comp_mask;
91
92#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
93
94/*
95 * Structures for SA records are named "struct ib_sa_xxx_rec." No
96 * attempt is made to pack structures to match the physical layout of
97 * SA records in SA MADs; all packing and unpacking is handled by the
98 * SA query code.
99 *
100 * For a record with structure ib_sa_xxx_rec, the naming convention
101 * for the component mask value for field yyy is IB_SA_XXX_REC_YYY (we
102 * never use different abbreviations or otherwise change the spelling
103 * of xxx/yyy between ib_sa_xxx_rec.yyy and IB_SA_XXX_REC_YYY).
104 *
105 * Reserved rows are indicated with comments to help maintainability.
106 */
107
108/* reserved: 0 */
109/* reserved: 1 */
110#define IB_SA_PATH_REC_DGID IB_SA_COMP_MASK( 2)
111#define IB_SA_PATH_REC_SGID IB_SA_COMP_MASK( 3)
112#define IB_SA_PATH_REC_DLID IB_SA_COMP_MASK( 4)
113#define IB_SA_PATH_REC_SLID IB_SA_COMP_MASK( 5)
114#define IB_SA_PATH_REC_RAW_TRAFFIC IB_SA_COMP_MASK( 6)
115/* reserved: 7 */
116#define IB_SA_PATH_REC_FLOW_LABEL IB_SA_COMP_MASK( 8)
117#define IB_SA_PATH_REC_HOP_LIMIT IB_SA_COMP_MASK( 9)
118#define IB_SA_PATH_REC_TRAFFIC_CLASS IB_SA_COMP_MASK(10)
119#define IB_SA_PATH_REC_REVERSIBLE IB_SA_COMP_MASK(11)
120#define IB_SA_PATH_REC_NUMB_PATH IB_SA_COMP_MASK(12)
121#define IB_SA_PATH_REC_PKEY IB_SA_COMP_MASK(13)
122/* reserved: 14 */
123#define IB_SA_PATH_REC_SL IB_SA_COMP_MASK(15)
124#define IB_SA_PATH_REC_MTU_SELECTOR IB_SA_COMP_MASK(16)
125#define IB_SA_PATH_REC_MTU IB_SA_COMP_MASK(17)
126#define IB_SA_PATH_REC_RATE_SELECTOR IB_SA_COMP_MASK(18)
127#define IB_SA_PATH_REC_RATE IB_SA_COMP_MASK(19)
128#define IB_SA_PATH_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(20)
129#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21)
130#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22)
131
132struct ib_sa_path_rec {
133 /* reserved */
134 /* reserved */
135 union ib_gid dgid;
136 union ib_gid sgid;
137 u16 dlid;
138 u16 slid;
139 int raw_traffic;
140 /* reserved */
141 u32 flow_label;
142 u8 hop_limit;
143 u8 traffic_class;
144 int reversible;
145 u8 numb_path;
146 u16 pkey;
147 /* reserved */
148 u8 sl;
149 u8 mtu_selector;
150 enum ib_mtu mtu;
151 u8 rate_selector;
152 u8 rate;
153 u8 packet_life_time_selector;
154 u8 packet_life_time;
155 u8 preference;
156};
157
158#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
159#define IB_SA_MCMEMBER_REC_PORT_GID IB_SA_COMP_MASK( 1)
160#define IB_SA_MCMEMBER_REC_QKEY IB_SA_COMP_MASK( 2)
161#define IB_SA_MCMEMBER_REC_MLID IB_SA_COMP_MASK( 3)
162#define IB_SA_MCMEMBER_REC_MTU_SELECTOR IB_SA_COMP_MASK( 4)
163#define IB_SA_MCMEMBER_REC_MTU IB_SA_COMP_MASK( 5)
164#define IB_SA_MCMEMBER_REC_TRAFFIC_CLASS IB_SA_COMP_MASK( 6)
165#define IB_SA_MCMEMBER_REC_PKEY IB_SA_COMP_MASK( 7)
166#define IB_SA_MCMEMBER_REC_RATE_SELECTOR IB_SA_COMP_MASK( 8)
167#define IB_SA_MCMEMBER_REC_RATE IB_SA_COMP_MASK( 9)
168#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME_SELECTOR IB_SA_COMP_MASK(10)
169#define IB_SA_MCMEMBER_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(11)
170#define IB_SA_MCMEMBER_REC_SL IB_SA_COMP_MASK(12)
171#define IB_SA_MCMEMBER_REC_FLOW_LABEL IB_SA_COMP_MASK(13)
172#define IB_SA_MCMEMBER_REC_HOP_LIMIT IB_SA_COMP_MASK(14)
173#define IB_SA_MCMEMBER_REC_SCOPE IB_SA_COMP_MASK(15)
174#define IB_SA_MCMEMBER_REC_JOIN_STATE IB_SA_COMP_MASK(16)
175#define IB_SA_MCMEMBER_REC_PROXY_JOIN IB_SA_COMP_MASK(17)
176
177struct ib_sa_mcmember_rec {
178 union ib_gid mgid;
179 union ib_gid port_gid;
180 u32 qkey;
181 u16 mlid;
182 u8 mtu_selector;
183 enum ib_mtu mtu;
184 u8 traffic_class;
185 u16 pkey;
186 u8 rate_selector;
187 u8 rate;
188 u8 packet_life_time_selector;
189 u8 packet_life_time;
190 u8 sl;
191 u32 flow_label;
192 u8 hop_limit;
193 u8 scope;
194 u8 join_state;
195 int proxy_join;
196};
197
198struct ib_sa_query;
199
200void ib_sa_cancel_query(int id, struct ib_sa_query *query);
201
202int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
203 struct ib_sa_path_rec *rec,
204 ib_sa_comp_mask comp_mask,
205 int timeout_ms, int gfp_mask,
206 void (*callback)(int status,
207 struct ib_sa_path_rec *resp,
208 void *context),
209 void *context,
210 struct ib_sa_query **query);
211
212int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
213 u8 method,
214 struct ib_sa_mcmember_rec *rec,
215 ib_sa_comp_mask comp_mask,
216 int timeout_ms, int gfp_mask,
217 void (*callback)(int status,
218 struct ib_sa_mcmember_rec *resp,
219 void *context),
220 void *context,
221 struct ib_sa_query **query);
222
223/**
224 * ib_sa_mcmember_rec_set - Start an MCMember set query
225 * @device:device to send query on
226 * @port_num: port number to send query on
227 * @rec:MCMember Record to send in query
228 * @comp_mask:component mask to send in query
229 * @timeout_ms:time to wait for response
230 * @gfp_mask:GFP mask to use for internal allocations
231 * @callback:function called when query completes, times out or is
232 * canceled
233 * @context:opaque user context passed to callback
234 * @sa_query:query context, used to cancel query
235 *
236 * Send an MCMember Set query to the SA (eg to join a multicast
237 * group). The callback function will be called when the query
238 * completes (or fails); status is 0 for a successful response, -EINTR
239 * if the query is canceled, -ETIMEDOUT is the query timed out, or
240 * -EIO if an error occurred sending the query. The resp parameter of
241 * the callback is only valid if status is 0.
242 *
243 * If the return value of ib_sa_mcmember_rec_set() is negative, it is
244 * an error code. Otherwise it is a query ID that can be used to
245 * cancel the query.
246 */
247static inline int
248ib_sa_mcmember_rec_set(struct ib_device *device, u8 port_num,
249 struct ib_sa_mcmember_rec *rec,
250 ib_sa_comp_mask comp_mask,
251 int timeout_ms, int gfp_mask,
252 void (*callback)(int status,
253 struct ib_sa_mcmember_rec *resp,
254 void *context),
255 void *context,
256 struct ib_sa_query **query)
257{
258 return ib_sa_mcmember_rec_query(device, port_num,
259 IB_MGMT_METHOD_SET,
260 rec, comp_mask,
261 timeout_ms, gfp_mask, callback,
262 context, query);
263}
264
265/**
266 * ib_sa_mcmember_rec_delete - Start an MCMember delete query
267 * @device:device to send query on
268 * @port_num: port number to send query on
269 * @rec:MCMember Record to send in query
270 * @comp_mask:component mask to send in query
271 * @timeout_ms:time to wait for response
272 * @gfp_mask:GFP mask to use for internal allocations
273 * @callback:function called when query completes, times out or is
274 * canceled
275 * @context:opaque user context passed to callback
276 * @sa_query:query context, used to cancel query
277 *
278 * Send an MCMember Delete query to the SA (eg to leave a multicast
279 * group). The callback function will be called when the query
280 * completes (or fails); status is 0 for a successful response, -EINTR
281 * if the query is canceled, -ETIMEDOUT is the query timed out, or
282 * -EIO if an error occurred sending the query. The resp parameter of
283 * the callback is only valid if status is 0.
284 *
285 * If the return value of ib_sa_mcmember_rec_delete() is negative, it
286 * is an error code. Otherwise it is a query ID that can be used to
287 * cancel the query.
288 */
289static inline int
290ib_sa_mcmember_rec_delete(struct ib_device *device, u8 port_num,
291 struct ib_sa_mcmember_rec *rec,
292 ib_sa_comp_mask comp_mask,
293 int timeout_ms, int gfp_mask,
294 void (*callback)(int status,
295 struct ib_sa_mcmember_rec *resp,
296 void *context),
297 void *context,
298 struct ib_sa_query **query)
299{
300 return ib_sa_mcmember_rec_query(device, port_num,
301 IB_SA_METHOD_DELETE,
302 rec, comp_mask,
303 timeout_ms, gfp_mask, callback,
304 context, query);
305}
306
307
308#endif /* IB_SA_H */
diff --git a/drivers/infiniband/include/ib_smi.h b/drivers/infiniband/include/ib_smi.h
new file mode 100644
index 000000000000..ca8216514963
--- /dev/null
+++ b/drivers/infiniband/include/ib_smi.h
@@ -0,0 +1,96 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_smi.h 1389 2004-12-27 22:56:47Z roland $
37 */
38
39#if !defined( IB_SMI_H )
40#define IB_SMI_H
41
42#include <ib_mad.h>
43
44#define IB_LID_PERMISSIVE 0xFFFF
45
46#define IB_SMP_DATA_SIZE 64
47#define IB_SMP_MAX_PATH_HOPS 64
48
49struct ib_smp {
50 u8 base_version;
51 u8 mgmt_class;
52 u8 class_version;
53 u8 method;
54 u16 status;
55 u8 hop_ptr;
56 u8 hop_cnt;
57 u64 tid;
58 u16 attr_id;
59 u16 resv;
60 u32 attr_mod;
61 u64 mkey;
62 u16 dr_slid;
63 u16 dr_dlid;
64 u8 reserved[28];
65 u8 data[IB_SMP_DATA_SIZE];
66 u8 initial_path[IB_SMP_MAX_PATH_HOPS];
67 u8 return_path[IB_SMP_MAX_PATH_HOPS];
68} __attribute__ ((packed));
69
70#define IB_SMP_DIRECTION __constant_htons(0x8000)
71
72/* Subnet management attributes */
73#define IB_SMP_ATTR_NOTICE __constant_htons(0x0002)
74#define IB_SMP_ATTR_NODE_DESC __constant_htons(0x0010)
75#define IB_SMP_ATTR_NODE_INFO __constant_htons(0x0011)
76#define IB_SMP_ATTR_SWITCH_INFO __constant_htons(0x0012)
77#define IB_SMP_ATTR_GUID_INFO __constant_htons(0x0014)
78#define IB_SMP_ATTR_PORT_INFO __constant_htons(0x0015)
79#define IB_SMP_ATTR_PKEY_TABLE __constant_htons(0x0016)
80#define IB_SMP_ATTR_SL_TO_VL_TABLE __constant_htons(0x0017)
81#define IB_SMP_ATTR_VL_ARB_TABLE __constant_htons(0x0018)
82#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE __constant_htons(0x0019)
83#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE __constant_htons(0x001A)
84#define IB_SMP_ATTR_MCAST_FORWARD_TABLE __constant_htons(0x001B)
85#define IB_SMP_ATTR_SM_INFO __constant_htons(0x0020)
86#define IB_SMP_ATTR_VENDOR_DIAG __constant_htons(0x0030)
87#define IB_SMP_ATTR_LED_INFO __constant_htons(0x0031)
88#define IB_SMP_ATTR_VENDOR_MASK __constant_htons(0xFF00)
89
90static inline u8
91ib_get_smp_direction(struct ib_smp *smp)
92{
93 return ((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION);
94}
95
96#endif /* IB_SMI_H */
diff --git a/drivers/infiniband/include/ib_user_mad.h b/drivers/infiniband/include/ib_user_mad.h
new file mode 100644
index 000000000000..06ad4a6075fa
--- /dev/null
+++ b/drivers/infiniband/include/ib_user_mad.h
@@ -0,0 +1,123 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ib_user_mad.h 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#ifndef IB_USER_MAD_H
36#define IB_USER_MAD_H
37
38#include <linux/types.h>
39#include <linux/ioctl.h>
40
41/*
42 * Increment this value if any changes that break userspace ABI
43 * compatibility are made.
44 */
45#define IB_USER_MAD_ABI_VERSION 2
46
47/*
48 * Make sure that all structs defined in this file remain laid out so
49 * that they pack the same way on 32-bit and 64-bit architectures (to
50 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
51 */
52
53/**
54 * ib_user_mad - MAD packet
55 * @data - Contents of MAD
56 * @id - ID of agent MAD received with/to be sent with
57 * @status - 0 on successful receive, ETIMEDOUT if no response
58 * received (transaction ID in data[] will be set to TID of original
59 * request) (ignored on send)
60 * @timeout_ms - Milliseconds to wait for response (unset on receive)
61 * @qpn - Remote QP number received from/to be sent to
62 * @qkey - Remote Q_Key to be sent with (unset on receive)
63 * @lid - Remote lid received from/to be sent to
64 * @sl - Service level received with/to be sent with
65 * @path_bits - Local path bits received with/to be sent with
66 * @grh_present - If set, GRH was received/should be sent
67 * @gid_index - Local GID index to send with (unset on receive)
68 * @hop_limit - Hop limit in GRH
69 * @traffic_class - Traffic class in GRH
70 * @gid - Remote GID in GRH
71 * @flow_label - Flow label in GRH
72 *
73 * All multi-byte quantities are stored in network (big endian) byte order.
74 */
75struct ib_user_mad {
76 __u8 data[256];
77 __u32 id;
78 __u32 status;
79 __u32 timeout_ms;
80 __u32 qpn;
81 __u32 qkey;
82 __u16 lid;
83 __u8 sl;
84 __u8 path_bits;
85 __u8 grh_present;
86 __u8 gid_index;
87 __u8 hop_limit;
88 __u8 traffic_class;
89 __u8 gid[16];
90 __u32 flow_label;
91};
92
93/**
94 * ib_user_mad_reg_req - MAD registration request
95 * @id - Set by the kernel; used to identify agent in future requests.
96 * @qpn - Queue pair number; must be 0 or 1.
97 * @method_mask - The caller will receive unsolicited MADs for any method
98 * where @method_mask = 1.
99 * @mgmt_class - Indicates which management class of MADs should be receive
100 * by the caller. This field is only required if the user wishes to
101 * receive unsolicited MADs, otherwise it should be 0.
102 * @mgmt_class_version - Indicates which version of MADs for the given
103 * management class to receive.
104 * @oui: Indicates IEEE OUI when mgmt_class is a vendor class
105 * in the range from 0x30 to 0x4f. Otherwise not used.
106 */
107struct ib_user_mad_reg_req {
108 __u32 id;
109 __u32 method_mask[4];
110 __u8 qpn;
111 __u8 mgmt_class;
112 __u8 mgmt_class_version;
113 __u8 oui[3];
114};
115
116#define IB_IOCTL_MAGIC 0x1b
117
118#define IB_USER_MAD_REGISTER_AGENT _IOWR(IB_IOCTL_MAGIC, 1, \
119 struct ib_user_mad_reg_req)
120
121#define IB_USER_MAD_UNREGISTER_AGENT _IOW(IB_IOCTL_MAGIC, 2, __u32)
122
123#endif /* IB_USER_MAD_H */
diff --git a/drivers/infiniband/include/ib_verbs.h b/drivers/infiniband/include/ib_verbs.h
new file mode 100644
index 000000000000..cf01f044a223
--- /dev/null
+++ b/drivers/infiniband/include/ib_verbs.h
@@ -0,0 +1,1252 @@
1/*
2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 *
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
13 *
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
16 * conditions are met:
17 *
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer.
21 *
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
26 *
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE.
35 *
36 * $Id: ib_verbs.h 1349 2004-12-16 21:09:43Z roland $
37 */
38
39#if !defined(IB_VERBS_H)
40#define IB_VERBS_H
41
42#include <linux/types.h>
43#include <linux/device.h>
44#include <asm/atomic.h>
45
46union ib_gid {
47 u8 raw[16];
48 struct {
49 u64 subnet_prefix;
50 u64 interface_id;
51 } global;
52};
53
54enum ib_node_type {
55 IB_NODE_CA = 1,
56 IB_NODE_SWITCH,
57 IB_NODE_ROUTER
58};
59
60enum ib_device_cap_flags {
61 IB_DEVICE_RESIZE_MAX_WR = 1,
62 IB_DEVICE_BAD_PKEY_CNTR = (1<<1),
63 IB_DEVICE_BAD_QKEY_CNTR = (1<<2),
64 IB_DEVICE_RAW_MULTI = (1<<3),
65 IB_DEVICE_AUTO_PATH_MIG = (1<<4),
66 IB_DEVICE_CHANGE_PHY_PORT = (1<<5),
67 IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6),
68 IB_DEVICE_CURR_QP_STATE_MOD = (1<<7),
69 IB_DEVICE_SHUTDOWN_PORT = (1<<8),
70 IB_DEVICE_INIT_TYPE = (1<<9),
71 IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10),
72 IB_DEVICE_SYS_IMAGE_GUID = (1<<11),
73 IB_DEVICE_RC_RNR_NAK_GEN = (1<<12),
74 IB_DEVICE_SRQ_RESIZE = (1<<13),
75 IB_DEVICE_N_NOTIFY_CQ = (1<<14),
76};
77
78enum ib_atomic_cap {
79 IB_ATOMIC_NONE,
80 IB_ATOMIC_HCA,
81 IB_ATOMIC_GLOB
82};
83
84struct ib_device_attr {
85 u64 fw_ver;
86 u64 node_guid;
87 u64 sys_image_guid;
88 u64 max_mr_size;
89 u64 page_size_cap;
90 u32 vendor_id;
91 u32 vendor_part_id;
92 u32 hw_ver;
93 int max_qp;
94 int max_qp_wr;
95 int device_cap_flags;
96 int max_sge;
97 int max_sge_rd;
98 int max_cq;
99 int max_cqe;
100 int max_mr;
101 int max_pd;
102 int max_qp_rd_atom;
103 int max_ee_rd_atom;
104 int max_res_rd_atom;
105 int max_qp_init_rd_atom;
106 int max_ee_init_rd_atom;
107 enum ib_atomic_cap atomic_cap;
108 int max_ee;
109 int max_rdd;
110 int max_mw;
111 int max_raw_ipv6_qp;
112 int max_raw_ethy_qp;
113 int max_mcast_grp;
114 int max_mcast_qp_attach;
115 int max_total_mcast_qp_attach;
116 int max_ah;
117 int max_fmr;
118 int max_map_per_fmr;
119 int max_srq;
120 int max_srq_wr;
121 int max_srq_sge;
122 u16 max_pkeys;
123 u8 local_ca_ack_delay;
124};
125
126enum ib_mtu {
127 IB_MTU_256 = 1,
128 IB_MTU_512 = 2,
129 IB_MTU_1024 = 3,
130 IB_MTU_2048 = 4,
131 IB_MTU_4096 = 5
132};
133
134static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
135{
136 switch (mtu) {
137 case IB_MTU_256: return 256;
138 case IB_MTU_512: return 512;
139 case IB_MTU_1024: return 1024;
140 case IB_MTU_2048: return 2048;
141 case IB_MTU_4096: return 4096;
142 default: return -1;
143 }
144}
145
146enum ib_port_state {
147 IB_PORT_NOP = 0,
148 IB_PORT_DOWN = 1,
149 IB_PORT_INIT = 2,
150 IB_PORT_ARMED = 3,
151 IB_PORT_ACTIVE = 4,
152 IB_PORT_ACTIVE_DEFER = 5
153};
154
155enum ib_port_cap_flags {
156 IB_PORT_SM = 1 << 1,
157 IB_PORT_NOTICE_SUP = 1 << 2,
158 IB_PORT_TRAP_SUP = 1 << 3,
159 IB_PORT_OPT_IPD_SUP = 1 << 4,
160 IB_PORT_AUTO_MIGR_SUP = 1 << 5,
161 IB_PORT_SL_MAP_SUP = 1 << 6,
162 IB_PORT_MKEY_NVRAM = 1 << 7,
163 IB_PORT_PKEY_NVRAM = 1 << 8,
164 IB_PORT_LED_INFO_SUP = 1 << 9,
165 IB_PORT_SM_DISABLED = 1 << 10,
166 IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
167 IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
168 IB_PORT_CM_SUP = 1 << 16,
169 IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
170 IB_PORT_REINIT_SUP = 1 << 18,
171 IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
172 IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
173 IB_PORT_DR_NOTICE_SUP = 1 << 21,
174 IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
175 IB_PORT_BOOT_MGMT_SUP = 1 << 23,
176 IB_PORT_LINK_LATENCY_SUP = 1 << 24,
177 IB_PORT_CLIENT_REG_SUP = 1 << 25
178};
179
180enum ib_port_width {
181 IB_WIDTH_1X = 1,
182 IB_WIDTH_4X = 2,
183 IB_WIDTH_8X = 4,
184 IB_WIDTH_12X = 8
185};
186
187static inline int ib_width_enum_to_int(enum ib_port_width width)
188{
189 switch (width) {
190 case IB_WIDTH_1X: return 1;
191 case IB_WIDTH_4X: return 4;
192 case IB_WIDTH_8X: return 8;
193 case IB_WIDTH_12X: return 12;
194 default: return -1;
195 }
196}
197
198struct ib_port_attr {
199 enum ib_port_state state;
200 enum ib_mtu max_mtu;
201 enum ib_mtu active_mtu;
202 int gid_tbl_len;
203 u32 port_cap_flags;
204 u32 max_msg_sz;
205 u32 bad_pkey_cntr;
206 u32 qkey_viol_cntr;
207 u16 pkey_tbl_len;
208 u16 lid;
209 u16 sm_lid;
210 u8 lmc;
211 u8 max_vl_num;
212 u8 sm_sl;
213 u8 subnet_timeout;
214 u8 init_type_reply;
215 u8 active_width;
216 u8 active_speed;
217 u8 phys_state;
218};
219
220enum ib_device_modify_flags {
221 IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1
222};
223
224struct ib_device_modify {
225 u64 sys_image_guid;
226};
227
228enum ib_port_modify_flags {
229 IB_PORT_SHUTDOWN = 1,
230 IB_PORT_INIT_TYPE = (1<<2),
231 IB_PORT_RESET_QKEY_CNTR = (1<<3)
232};
233
234struct ib_port_modify {
235 u32 set_port_cap_mask;
236 u32 clr_port_cap_mask;
237 u8 init_type;
238};
239
240enum ib_event_type {
241 IB_EVENT_CQ_ERR,
242 IB_EVENT_QP_FATAL,
243 IB_EVENT_QP_REQ_ERR,
244 IB_EVENT_QP_ACCESS_ERR,
245 IB_EVENT_COMM_EST,
246 IB_EVENT_SQ_DRAINED,
247 IB_EVENT_PATH_MIG,
248 IB_EVENT_PATH_MIG_ERR,
249 IB_EVENT_DEVICE_FATAL,
250 IB_EVENT_PORT_ACTIVE,
251 IB_EVENT_PORT_ERR,
252 IB_EVENT_LID_CHANGE,
253 IB_EVENT_PKEY_CHANGE,
254 IB_EVENT_SM_CHANGE
255};
256
257struct ib_event {
258 struct ib_device *device;
259 union {
260 struct ib_cq *cq;
261 struct ib_qp *qp;
262 u8 port_num;
263 } element;
264 enum ib_event_type event;
265};
266
267struct ib_event_handler {
268 struct ib_device *device;
269 void (*handler)(struct ib_event_handler *, struct ib_event *);
270 struct list_head list;
271};
272
273#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \
274 do { \
275 (_ptr)->device = _device; \
276 (_ptr)->handler = _handler; \
277 INIT_LIST_HEAD(&(_ptr)->list); \
278 } while (0)
279
280struct ib_global_route {
281 union ib_gid dgid;
282 u32 flow_label;
283 u8 sgid_index;
284 u8 hop_limit;
285 u8 traffic_class;
286};
287
288enum {
289 IB_MULTICAST_QPN = 0xffffff
290};
291
292enum ib_ah_flags {
293 IB_AH_GRH = 1
294};
295
296struct ib_ah_attr {
297 struct ib_global_route grh;
298 u16 dlid;
299 u8 sl;
300 u8 src_path_bits;
301 u8 static_rate;
302 u8 ah_flags;
303 u8 port_num;
304};
305
306enum ib_wc_status {
307 IB_WC_SUCCESS,
308 IB_WC_LOC_LEN_ERR,
309 IB_WC_LOC_QP_OP_ERR,
310 IB_WC_LOC_EEC_OP_ERR,
311 IB_WC_LOC_PROT_ERR,
312 IB_WC_WR_FLUSH_ERR,
313 IB_WC_MW_BIND_ERR,
314 IB_WC_BAD_RESP_ERR,
315 IB_WC_LOC_ACCESS_ERR,
316 IB_WC_REM_INV_REQ_ERR,
317 IB_WC_REM_ACCESS_ERR,
318 IB_WC_REM_OP_ERR,
319 IB_WC_RETRY_EXC_ERR,
320 IB_WC_RNR_RETRY_EXC_ERR,
321 IB_WC_LOC_RDD_VIOL_ERR,
322 IB_WC_REM_INV_RD_REQ_ERR,
323 IB_WC_REM_ABORT_ERR,
324 IB_WC_INV_EECN_ERR,
325 IB_WC_INV_EEC_STATE_ERR,
326 IB_WC_FATAL_ERR,
327 IB_WC_RESP_TIMEOUT_ERR,
328 IB_WC_GENERAL_ERR
329};
330
331enum ib_wc_opcode {
332 IB_WC_SEND,
333 IB_WC_RDMA_WRITE,
334 IB_WC_RDMA_READ,
335 IB_WC_COMP_SWAP,
336 IB_WC_FETCH_ADD,
337 IB_WC_BIND_MW,
338/*
339 * Set value of IB_WC_RECV so consumers can test if a completion is a
340 * receive by testing (opcode & IB_WC_RECV).
341 */
342 IB_WC_RECV = 1 << 7,
343 IB_WC_RECV_RDMA_WITH_IMM
344};
345
346enum ib_wc_flags {
347 IB_WC_GRH = 1,
348 IB_WC_WITH_IMM = (1<<1)
349};
350
351struct ib_wc {
352 u64 wr_id;
353 enum ib_wc_status status;
354 enum ib_wc_opcode opcode;
355 u32 vendor_err;
356 u32 byte_len;
357 __be32 imm_data;
358 u32 qp_num;
359 u32 src_qp;
360 int wc_flags;
361 u16 pkey_index;
362 u16 slid;
363 u8 sl;
364 u8 dlid_path_bits;
365 u8 port_num; /* valid only for DR SMPs on switches */
366};
367
368enum ib_cq_notify {
369 IB_CQ_SOLICITED,
370 IB_CQ_NEXT_COMP
371};
372
373struct ib_qp_cap {
374 u32 max_send_wr;
375 u32 max_recv_wr;
376 u32 max_send_sge;
377 u32 max_recv_sge;
378 u32 max_inline_data;
379};
380
381enum ib_sig_type {
382 IB_SIGNAL_ALL_WR,
383 IB_SIGNAL_REQ_WR
384};
385
386enum ib_qp_type {
387 /*
388 * IB_QPT_SMI and IB_QPT_GSI have to be the first two entries
389 * here (and in that order) since the MAD layer uses them as
390 * indices into a 2-entry table.
391 */
392 IB_QPT_SMI,
393 IB_QPT_GSI,
394
395 IB_QPT_RC,
396 IB_QPT_UC,
397 IB_QPT_UD,
398 IB_QPT_RAW_IPV6,
399 IB_QPT_RAW_ETY
400};
401
402struct ib_qp_init_attr {
403 void (*event_handler)(struct ib_event *, void *);
404 void *qp_context;
405 struct ib_cq *send_cq;
406 struct ib_cq *recv_cq;
407 struct ib_srq *srq;
408 struct ib_qp_cap cap;
409 enum ib_sig_type sq_sig_type;
410 enum ib_qp_type qp_type;
411 u8 port_num; /* special QP types only */
412};
413
414enum ib_rnr_timeout {
415 IB_RNR_TIMER_655_36 = 0,
416 IB_RNR_TIMER_000_01 = 1,
417 IB_RNR_TIMER_000_02 = 2,
418 IB_RNR_TIMER_000_03 = 3,
419 IB_RNR_TIMER_000_04 = 4,
420 IB_RNR_TIMER_000_06 = 5,
421 IB_RNR_TIMER_000_08 = 6,
422 IB_RNR_TIMER_000_12 = 7,
423 IB_RNR_TIMER_000_16 = 8,
424 IB_RNR_TIMER_000_24 = 9,
425 IB_RNR_TIMER_000_32 = 10,
426 IB_RNR_TIMER_000_48 = 11,
427 IB_RNR_TIMER_000_64 = 12,
428 IB_RNR_TIMER_000_96 = 13,
429 IB_RNR_TIMER_001_28 = 14,
430 IB_RNR_TIMER_001_92 = 15,
431 IB_RNR_TIMER_002_56 = 16,
432 IB_RNR_TIMER_003_84 = 17,
433 IB_RNR_TIMER_005_12 = 18,
434 IB_RNR_TIMER_007_68 = 19,
435 IB_RNR_TIMER_010_24 = 20,
436 IB_RNR_TIMER_015_36 = 21,
437 IB_RNR_TIMER_020_48 = 22,
438 IB_RNR_TIMER_030_72 = 23,
439 IB_RNR_TIMER_040_96 = 24,
440 IB_RNR_TIMER_061_44 = 25,
441 IB_RNR_TIMER_081_92 = 26,
442 IB_RNR_TIMER_122_88 = 27,
443 IB_RNR_TIMER_163_84 = 28,
444 IB_RNR_TIMER_245_76 = 29,
445 IB_RNR_TIMER_327_68 = 30,
446 IB_RNR_TIMER_491_52 = 31
447};
448
449enum ib_qp_attr_mask {
450 IB_QP_STATE = 1,
451 IB_QP_CUR_STATE = (1<<1),
452 IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2),
453 IB_QP_ACCESS_FLAGS = (1<<3),
454 IB_QP_PKEY_INDEX = (1<<4),
455 IB_QP_PORT = (1<<5),
456 IB_QP_QKEY = (1<<6),
457 IB_QP_AV = (1<<7),
458 IB_QP_PATH_MTU = (1<<8),
459 IB_QP_TIMEOUT = (1<<9),
460 IB_QP_RETRY_CNT = (1<<10),
461 IB_QP_RNR_RETRY = (1<<11),
462 IB_QP_RQ_PSN = (1<<12),
463 IB_QP_MAX_QP_RD_ATOMIC = (1<<13),
464 IB_QP_ALT_PATH = (1<<14),
465 IB_QP_MIN_RNR_TIMER = (1<<15),
466 IB_QP_SQ_PSN = (1<<16),
467 IB_QP_MAX_DEST_RD_ATOMIC = (1<<17),
468 IB_QP_PATH_MIG_STATE = (1<<18),
469 IB_QP_CAP = (1<<19),
470 IB_QP_DEST_QPN = (1<<20)
471};
472
473enum ib_qp_state {
474 IB_QPS_RESET,
475 IB_QPS_INIT,
476 IB_QPS_RTR,
477 IB_QPS_RTS,
478 IB_QPS_SQD,
479 IB_QPS_SQE,
480 IB_QPS_ERR
481};
482
483enum ib_mig_state {
484 IB_MIG_MIGRATED,
485 IB_MIG_REARM,
486 IB_MIG_ARMED
487};
488
489struct ib_qp_attr {
490 enum ib_qp_state qp_state;
491 enum ib_qp_state cur_qp_state;
492 enum ib_mtu path_mtu;
493 enum ib_mig_state path_mig_state;
494 u32 qkey;
495 u32 rq_psn;
496 u32 sq_psn;
497 u32 dest_qp_num;
498 int qp_access_flags;
499 struct ib_qp_cap cap;
500 struct ib_ah_attr ah_attr;
501 struct ib_ah_attr alt_ah_attr;
502 u16 pkey_index;
503 u16 alt_pkey_index;
504 u8 en_sqd_async_notify;
505 u8 sq_draining;
506 u8 max_rd_atomic;
507 u8 max_dest_rd_atomic;
508 u8 min_rnr_timer;
509 u8 port_num;
510 u8 timeout;
511 u8 retry_cnt;
512 u8 rnr_retry;
513 u8 alt_port_num;
514 u8 alt_timeout;
515};
516
517enum ib_wr_opcode {
518 IB_WR_RDMA_WRITE,
519 IB_WR_RDMA_WRITE_WITH_IMM,
520 IB_WR_SEND,
521 IB_WR_SEND_WITH_IMM,
522 IB_WR_RDMA_READ,
523 IB_WR_ATOMIC_CMP_AND_SWP,
524 IB_WR_ATOMIC_FETCH_AND_ADD
525};
526
527enum ib_send_flags {
528 IB_SEND_FENCE = 1,
529 IB_SEND_SIGNALED = (1<<1),
530 IB_SEND_SOLICITED = (1<<2),
531 IB_SEND_INLINE = (1<<3)
532};
533
534struct ib_sge {
535 u64 addr;
536 u32 length;
537 u32 lkey;
538};
539
540struct ib_send_wr {
541 struct ib_send_wr *next;
542 u64 wr_id;
543 struct ib_sge *sg_list;
544 int num_sge;
545 enum ib_wr_opcode opcode;
546 int send_flags;
547 u32 imm_data;
548 union {
549 struct {
550 u64 remote_addr;
551 u32 rkey;
552 } rdma;
553 struct {
554 u64 remote_addr;
555 u64 compare_add;
556 u64 swap;
557 u32 rkey;
558 } atomic;
559 struct {
560 struct ib_ah *ah;
561 struct ib_mad_hdr *mad_hdr;
562 u32 remote_qpn;
563 u32 remote_qkey;
564 int timeout_ms; /* valid for MADs only */
565 u16 pkey_index; /* valid for GSI only */
566 u8 port_num; /* valid for DR SMPs on switch only */
567 } ud;
568 } wr;
569};
570
571struct ib_recv_wr {
572 struct ib_recv_wr *next;
573 u64 wr_id;
574 struct ib_sge *sg_list;
575 int num_sge;
576};
577
578enum ib_access_flags {
579 IB_ACCESS_LOCAL_WRITE = 1,
580 IB_ACCESS_REMOTE_WRITE = (1<<1),
581 IB_ACCESS_REMOTE_READ = (1<<2),
582 IB_ACCESS_REMOTE_ATOMIC = (1<<3),
583 IB_ACCESS_MW_BIND = (1<<4)
584};
585
586struct ib_phys_buf {
587 u64 addr;
588 u64 size;
589};
590
591struct ib_mr_attr {
592 struct ib_pd *pd;
593 u64 device_virt_addr;
594 u64 size;
595 int mr_access_flags;
596 u32 lkey;
597 u32 rkey;
598};
599
600enum ib_mr_rereg_flags {
601 IB_MR_REREG_TRANS = 1,
602 IB_MR_REREG_PD = (1<<1),
603 IB_MR_REREG_ACCESS = (1<<2)
604};
605
606struct ib_mw_bind {
607 struct ib_mr *mr;
608 u64 wr_id;
609 u64 addr;
610 u32 length;
611 int send_flags;
612 int mw_access_flags;
613};
614
615struct ib_fmr_attr {
616 int max_pages;
617 int max_maps;
618 u8 page_size;
619};
620
621struct ib_pd {
622 struct ib_device *device;
623 atomic_t usecnt; /* count all resources */
624};
625
626struct ib_ah {
627 struct ib_device *device;
628 struct ib_pd *pd;
629};
630
631typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
632
633struct ib_cq {
634 struct ib_device *device;
635 ib_comp_handler comp_handler;
636 void (*event_handler)(struct ib_event *, void *);
637 void * cq_context;
638 int cqe;
639 atomic_t usecnt; /* count number of work queues */
640};
641
642struct ib_srq {
643 struct ib_device *device;
644 struct ib_pd *pd;
645 void *srq_context;
646 atomic_t usecnt;
647};
648
649struct ib_qp {
650 struct ib_device *device;
651 struct ib_pd *pd;
652 struct ib_cq *send_cq;
653 struct ib_cq *recv_cq;
654 struct ib_srq *srq;
655 void (*event_handler)(struct ib_event *, void *);
656 void *qp_context;
657 u32 qp_num;
658 enum ib_qp_type qp_type;
659};
660
661struct ib_mr {
662 struct ib_device *device;
663 struct ib_pd *pd;
664 u32 lkey;
665 u32 rkey;
666 atomic_t usecnt; /* count number of MWs */
667};
668
669struct ib_mw {
670 struct ib_device *device;
671 struct ib_pd *pd;
672 u32 rkey;
673};
674
675struct ib_fmr {
676 struct ib_device *device;
677 struct ib_pd *pd;
678 struct list_head list;
679 u32 lkey;
680 u32 rkey;
681};
682
683struct ib_mad;
684struct ib_grh;
685
686enum ib_process_mad_flags {
687 IB_MAD_IGNORE_MKEY = 1,
688 IB_MAD_IGNORE_BKEY = 2,
689 IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY
690};
691
692enum ib_mad_result {
693 IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */
694 IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */
695 IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */
696 IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */
697};
698
699#define IB_DEVICE_NAME_MAX 64
700
701struct ib_cache {
702 rwlock_t lock;
703 struct ib_event_handler event_handler;
704 struct ib_pkey_cache **pkey_cache;
705 struct ib_gid_cache **gid_cache;
706};
707
708struct ib_device {
709 struct device *dma_device;
710
711 char name[IB_DEVICE_NAME_MAX];
712
713 struct list_head event_handler_list;
714 spinlock_t event_handler_lock;
715
716 struct list_head core_list;
717 struct list_head client_data_list;
718 spinlock_t client_data_lock;
719
720 struct ib_cache cache;
721
722 u32 flags;
723
724 int (*query_device)(struct ib_device *device,
725 struct ib_device_attr *device_attr);
726 int (*query_port)(struct ib_device *device,
727 u8 port_num,
728 struct ib_port_attr *port_attr);
729 int (*query_gid)(struct ib_device *device,
730 u8 port_num, int index,
731 union ib_gid *gid);
732 int (*query_pkey)(struct ib_device *device,
733 u8 port_num, u16 index, u16 *pkey);
734 int (*modify_device)(struct ib_device *device,
735 int device_modify_mask,
736 struct ib_device_modify *device_modify);
737 int (*modify_port)(struct ib_device *device,
738 u8 port_num, int port_modify_mask,
739 struct ib_port_modify *port_modify);
740 struct ib_pd * (*alloc_pd)(struct ib_device *device);
741 int (*dealloc_pd)(struct ib_pd *pd);
742 struct ib_ah * (*create_ah)(struct ib_pd *pd,
743 struct ib_ah_attr *ah_attr);
744 int (*modify_ah)(struct ib_ah *ah,
745 struct ib_ah_attr *ah_attr);
746 int (*query_ah)(struct ib_ah *ah,
747 struct ib_ah_attr *ah_attr);
748 int (*destroy_ah)(struct ib_ah *ah);
749 struct ib_qp * (*create_qp)(struct ib_pd *pd,
750 struct ib_qp_init_attr *qp_init_attr);
751 int (*modify_qp)(struct ib_qp *qp,
752 struct ib_qp_attr *qp_attr,
753 int qp_attr_mask);
754 int (*query_qp)(struct ib_qp *qp,
755 struct ib_qp_attr *qp_attr,
756 int qp_attr_mask,
757 struct ib_qp_init_attr *qp_init_attr);
758 int (*destroy_qp)(struct ib_qp *qp);
759 int (*post_send)(struct ib_qp *qp,
760 struct ib_send_wr *send_wr,
761 struct ib_send_wr **bad_send_wr);
762 int (*post_recv)(struct ib_qp *qp,
763 struct ib_recv_wr *recv_wr,
764 struct ib_recv_wr **bad_recv_wr);
765 struct ib_cq * (*create_cq)(struct ib_device *device,
766 int cqe);
767 int (*destroy_cq)(struct ib_cq *cq);
768 int (*resize_cq)(struct ib_cq *cq, int *cqe);
769 int (*poll_cq)(struct ib_cq *cq, int num_entries,
770 struct ib_wc *wc);
771 int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
772 int (*req_notify_cq)(struct ib_cq *cq,
773 enum ib_cq_notify cq_notify);
774 int (*req_ncomp_notif)(struct ib_cq *cq,
775 int wc_cnt);
776 struct ib_mr * (*get_dma_mr)(struct ib_pd *pd,
777 int mr_access_flags);
778 struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd,
779 struct ib_phys_buf *phys_buf_array,
780 int num_phys_buf,
781 int mr_access_flags,
782 u64 *iova_start);
783 int (*query_mr)(struct ib_mr *mr,
784 struct ib_mr_attr *mr_attr);
785 int (*dereg_mr)(struct ib_mr *mr);
786 int (*rereg_phys_mr)(struct ib_mr *mr,
787 int mr_rereg_mask,
788 struct ib_pd *pd,
789 struct ib_phys_buf *phys_buf_array,
790 int num_phys_buf,
791 int mr_access_flags,
792 u64 *iova_start);
793 struct ib_mw * (*alloc_mw)(struct ib_pd *pd);
794 int (*bind_mw)(struct ib_qp *qp,
795 struct ib_mw *mw,
796 struct ib_mw_bind *mw_bind);
797 int (*dealloc_mw)(struct ib_mw *mw);
798 struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd,
799 int mr_access_flags,
800 struct ib_fmr_attr *fmr_attr);
801 int (*map_phys_fmr)(struct ib_fmr *fmr,
802 u64 *page_list, int list_len,
803 u64 iova);
804 int (*unmap_fmr)(struct list_head *fmr_list);
805 int (*dealloc_fmr)(struct ib_fmr *fmr);
806 int (*attach_mcast)(struct ib_qp *qp,
807 union ib_gid *gid,
808 u16 lid);
809 int (*detach_mcast)(struct ib_qp *qp,
810 union ib_gid *gid,
811 u16 lid);
812 int (*process_mad)(struct ib_device *device,
813 int process_mad_flags,
814 u8 port_num,
815 struct ib_wc *in_wc,
816 struct ib_grh *in_grh,
817 struct ib_mad *in_mad,
818 struct ib_mad *out_mad);
819
820 struct class_device class_dev;
821 struct kobject ports_parent;
822 struct list_head port_list;
823
824 enum {
825 IB_DEV_UNINITIALIZED,
826 IB_DEV_REGISTERED,
827 IB_DEV_UNREGISTERED
828 } reg_state;
829
830 u8 node_type;
831 u8 phys_port_cnt;
832};
833
834struct ib_client {
835 char *name;
836 void (*add) (struct ib_device *);
837 void (*remove)(struct ib_device *);
838
839 struct list_head list;
840};
841
842struct ib_device *ib_alloc_device(size_t size);
843void ib_dealloc_device(struct ib_device *device);
844
845int ib_register_device (struct ib_device *device);
846void ib_unregister_device(struct ib_device *device);
847
848int ib_register_client (struct ib_client *client);
849void ib_unregister_client(struct ib_client *client);
850
851void *ib_get_client_data(struct ib_device *device, struct ib_client *client);
852void ib_set_client_data(struct ib_device *device, struct ib_client *client,
853 void *data);
854
855int ib_register_event_handler (struct ib_event_handler *event_handler);
856int ib_unregister_event_handler(struct ib_event_handler *event_handler);
857void ib_dispatch_event(struct ib_event *event);
858
859int ib_query_device(struct ib_device *device,
860 struct ib_device_attr *device_attr);
861
862int ib_query_port(struct ib_device *device,
863 u8 port_num, struct ib_port_attr *port_attr);
864
865int ib_query_gid(struct ib_device *device,
866 u8 port_num, int index, union ib_gid *gid);
867
868int ib_query_pkey(struct ib_device *device,
869 u8 port_num, u16 index, u16 *pkey);
870
871int ib_modify_device(struct ib_device *device,
872 int device_modify_mask,
873 struct ib_device_modify *device_modify);
874
875int ib_modify_port(struct ib_device *device,
876 u8 port_num, int port_modify_mask,
877 struct ib_port_modify *port_modify);
878
879/**
880 * ib_alloc_pd - Allocates an unused protection domain.
881 * @device: The device on which to allocate the protection domain.
882 *
883 * A protection domain object provides an association between QPs, shared
884 * receive queues, address handles, memory regions, and memory windows.
885 */
886struct ib_pd *ib_alloc_pd(struct ib_device *device);
887
888/**
889 * ib_dealloc_pd - Deallocates a protection domain.
890 * @pd: The protection domain to deallocate.
891 */
892int ib_dealloc_pd(struct ib_pd *pd);
893
894/**
895 * ib_create_ah - Creates an address handle for the given address vector.
896 * @pd: The protection domain associated with the address handle.
897 * @ah_attr: The attributes of the address vector.
898 *
899 * The address handle is used to reference a local or global destination
900 * in all UD QP post sends.
901 */
902struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
903
904/**
905 * ib_modify_ah - Modifies the address vector associated with an address
906 * handle.
907 * @ah: The address handle to modify.
908 * @ah_attr: The new address vector attributes to associate with the
909 * address handle.
910 */
911int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
912
913/**
914 * ib_query_ah - Queries the address vector associated with an address
915 * handle.
916 * @ah: The address handle to query.
917 * @ah_attr: The address vector attributes associated with the address
918 * handle.
919 */
920int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
921
922/**
923 * ib_destroy_ah - Destroys an address handle.
924 * @ah: The address handle to destroy.
925 */
926int ib_destroy_ah(struct ib_ah *ah);
927
928/**
929 * ib_create_qp - Creates a QP associated with the specified protection
930 * domain.
931 * @pd: The protection domain associated with the QP.
932 * @qp_init_attr: A list of initial attributes required to create the QP.
933 */
934struct ib_qp *ib_create_qp(struct ib_pd *pd,
935 struct ib_qp_init_attr *qp_init_attr);
936
937/**
938 * ib_modify_qp - Modifies the attributes for the specified QP and then
939 * transitions the QP to the given state.
940 * @qp: The QP to modify.
941 * @qp_attr: On input, specifies the QP attributes to modify. On output,
942 * the current values of selected QP attributes are returned.
943 * @qp_attr_mask: A bit-mask used to specify which attributes of the QP
944 * are being modified.
945 */
946int ib_modify_qp(struct ib_qp *qp,
947 struct ib_qp_attr *qp_attr,
948 int qp_attr_mask);
949
950/**
951 * ib_query_qp - Returns the attribute list and current values for the
952 * specified QP.
953 * @qp: The QP to query.
954 * @qp_attr: The attributes of the specified QP.
955 * @qp_attr_mask: A bit-mask used to select specific attributes to query.
956 * @qp_init_attr: Additional attributes of the selected QP.
957 *
958 * The qp_attr_mask may be used to limit the query to gathering only the
959 * selected attributes.
960 */
961int ib_query_qp(struct ib_qp *qp,
962 struct ib_qp_attr *qp_attr,
963 int qp_attr_mask,
964 struct ib_qp_init_attr *qp_init_attr);
965
966/**
967 * ib_destroy_qp - Destroys the specified QP.
968 * @qp: The QP to destroy.
969 */
970int ib_destroy_qp(struct ib_qp *qp);
971
972/**
973 * ib_post_send - Posts a list of work requests to the send queue of
974 * the specified QP.
975 * @qp: The QP to post the work request on.
976 * @send_wr: A list of work requests to post on the send queue.
977 * @bad_send_wr: On an immediate failure, this parameter will reference
978 * the work request that failed to be posted on the QP.
979 */
980static inline int ib_post_send(struct ib_qp *qp,
981 struct ib_send_wr *send_wr,
982 struct ib_send_wr **bad_send_wr)
983{
984 return qp->device->post_send(qp, send_wr, bad_send_wr);
985}
986
987/**
988 * ib_post_recv - Posts a list of work requests to the receive queue of
989 * the specified QP.
990 * @qp: The QP to post the work request on.
991 * @recv_wr: A list of work requests to post on the receive queue.
992 * @bad_recv_wr: On an immediate failure, this parameter will reference
993 * the work request that failed to be posted on the QP.
994 */
995static inline int ib_post_recv(struct ib_qp *qp,
996 struct ib_recv_wr *recv_wr,
997 struct ib_recv_wr **bad_recv_wr)
998{
999 return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
1000}
1001
1002/**
1003 * ib_create_cq - Creates a CQ on the specified device.
1004 * @device: The device on which to create the CQ.
1005 * @comp_handler: A user-specified callback that is invoked when a
1006 * completion event occurs on the CQ.
1007 * @event_handler: A user-specified callback that is invoked when an
1008 * asynchronous event not associated with a completion occurs on the CQ.
1009 * @cq_context: Context associated with the CQ returned to the user via
1010 * the associated completion and event handlers.
1011 * @cqe: The minimum size of the CQ.
1012 *
1013 * Users can examine the cq structure to determine the actual CQ size.
1014 */
1015struct ib_cq *ib_create_cq(struct ib_device *device,
1016 ib_comp_handler comp_handler,
1017 void (*event_handler)(struct ib_event *, void *),
1018 void *cq_context, int cqe);
1019
1020/**
1021 * ib_resize_cq - Modifies the capacity of the CQ.
1022 * @cq: The CQ to resize.
1023 * @cqe: The minimum size of the CQ.
1024 *
1025 * Users can examine the cq structure to determine the actual CQ size.
1026 */
1027int ib_resize_cq(struct ib_cq *cq, int cqe);
1028
1029/**
1030 * ib_destroy_cq - Destroys the specified CQ.
1031 * @cq: The CQ to destroy.
1032 */
1033int ib_destroy_cq(struct ib_cq *cq);
1034
1035/**
1036 * ib_poll_cq - poll a CQ for completion(s)
1037 * @cq:the CQ being polled
1038 * @num_entries:maximum number of completions to return
1039 * @wc:array of at least @num_entries &struct ib_wc where completions
1040 * will be returned
1041 *
1042 * Poll a CQ for (possibly multiple) completions. If the return value
1043 * is < 0, an error occurred. If the return value is >= 0, it is the
1044 * number of completions returned. If the return value is
1045 * non-negative and < num_entries, then the CQ was emptied.
1046 */
1047static inline int ib_poll_cq(struct ib_cq *cq, int num_entries,
1048 struct ib_wc *wc)
1049{
1050 return cq->device->poll_cq(cq, num_entries, wc);
1051}
1052
1053/**
1054 * ib_peek_cq - Returns the number of unreaped completions currently
1055 * on the specified CQ.
1056 * @cq: The CQ to peek.
1057 * @wc_cnt: A minimum number of unreaped completions to check for.
1058 *
1059 * If the number of unreaped completions is greater than or equal to wc_cnt,
1060 * this function returns wc_cnt, otherwise, it returns the actual number of
1061 * unreaped completions.
1062 */
1063int ib_peek_cq(struct ib_cq *cq, int wc_cnt);
1064
1065/**
1066 * ib_req_notify_cq - Request completion notification on a CQ.
1067 * @cq: The CQ to generate an event for.
1068 * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will
1069 * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP,
1070 * notification will occur on the next completion.
1071 */
1072static inline int ib_req_notify_cq(struct ib_cq *cq,
1073 enum ib_cq_notify cq_notify)
1074{
1075 return cq->device->req_notify_cq(cq, cq_notify);
1076}
1077
1078/**
1079 * ib_req_ncomp_notif - Request completion notification when there are
1080 * at least the specified number of unreaped completions on the CQ.
1081 * @cq: The CQ to generate an event for.
1082 * @wc_cnt: The number of unreaped completions that should be on the
1083 * CQ before an event is generated.
1084 */
1085static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt)
1086{
1087 return cq->device->req_ncomp_notif ?
1088 cq->device->req_ncomp_notif(cq, wc_cnt) :
1089 -ENOSYS;
1090}
1091
1092/**
1093 * ib_get_dma_mr - Returns a memory region for system memory that is
1094 * usable for DMA.
1095 * @pd: The protection domain associated with the memory region.
1096 * @mr_access_flags: Specifies the memory access rights.
1097 */
1098struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags);
1099
1100/**
1101 * ib_reg_phys_mr - Prepares a virtually addressed memory region for use
1102 * by an HCA.
1103 * @pd: The protection domain associated assigned to the registered region.
1104 * @phys_buf_array: Specifies a list of physical buffers to use in the
1105 * memory region.
1106 * @num_phys_buf: Specifies the size of the phys_buf_array.
1107 * @mr_access_flags: Specifies the memory access rights.
1108 * @iova_start: The offset of the region's starting I/O virtual address.
1109 */
1110struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
1111 struct ib_phys_buf *phys_buf_array,
1112 int num_phys_buf,
1113 int mr_access_flags,
1114 u64 *iova_start);
1115
1116/**
1117 * ib_rereg_phys_mr - Modifies the attributes of an existing memory region.
1118 * Conceptually, this call performs the functions deregister memory region
1119 * followed by register physical memory region. Where possible,
1120 * resources are reused instead of deallocated and reallocated.
1121 * @mr: The memory region to modify.
1122 * @mr_rereg_mask: A bit-mask used to indicate which of the following
1123 * properties of the memory region are being modified.
1124 * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies
1125 * the new protection domain to associated with the memory region,
1126 * otherwise, this parameter is ignored.
1127 * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1128 * field specifies a list of physical buffers to use in the new
1129 * translation, otherwise, this parameter is ignored.
1130 * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this
1131 * field specifies the size of the phys_buf_array, otherwise, this
1132 * parameter is ignored.
1133 * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this
1134 * field specifies the new memory access rights, otherwise, this
1135 * parameter is ignored.
1136 * @iova_start: The offset of the region's starting I/O virtual address.
1137 */
1138int ib_rereg_phys_mr(struct ib_mr *mr,
1139 int mr_rereg_mask,
1140 struct ib_pd *pd,
1141 struct ib_phys_buf *phys_buf_array,
1142 int num_phys_buf,
1143 int mr_access_flags,
1144 u64 *iova_start);
1145
1146/**
1147 * ib_query_mr - Retrieves information about a specific memory region.
1148 * @mr: The memory region to retrieve information about.
1149 * @mr_attr: The attributes of the specified memory region.
1150 */
1151int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);
1152
1153/**
1154 * ib_dereg_mr - Deregisters a memory region and removes it from the
1155 * HCA translation table.
1156 * @mr: The memory region to deregister.
1157 */
1158int ib_dereg_mr(struct ib_mr *mr);
1159
1160/**
1161 * ib_alloc_mw - Allocates a memory window.
1162 * @pd: The protection domain associated with the memory window.
1163 */
1164struct ib_mw *ib_alloc_mw(struct ib_pd *pd);
1165
1166/**
1167 * ib_bind_mw - Posts a work request to the send queue of the specified
1168 * QP, which binds the memory window to the given address range and
1169 * remote access attributes.
1170 * @qp: QP to post the bind work request on.
1171 * @mw: The memory window to bind.
1172 * @mw_bind: Specifies information about the memory window, including
1173 * its address range, remote access rights, and associated memory region.
1174 */
1175static inline int ib_bind_mw(struct ib_qp *qp,
1176 struct ib_mw *mw,
1177 struct ib_mw_bind *mw_bind)
1178{
1179 /* XXX reference counting in corresponding MR? */
1180 return mw->device->bind_mw ?
1181 mw->device->bind_mw(qp, mw, mw_bind) :
1182 -ENOSYS;
1183}
1184
1185/**
1186 * ib_dealloc_mw - Deallocates a memory window.
1187 * @mw: The memory window to deallocate.
1188 */
1189int ib_dealloc_mw(struct ib_mw *mw);
1190
1191/**
1192 * ib_alloc_fmr - Allocates a unmapped fast memory region.
1193 * @pd: The protection domain associated with the unmapped region.
1194 * @mr_access_flags: Specifies the memory access rights.
1195 * @fmr_attr: Attributes of the unmapped region.
1196 *
1197 * A fast memory region must be mapped before it can be used as part of
1198 * a work request.
1199 */
1200struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1201 int mr_access_flags,
1202 struct ib_fmr_attr *fmr_attr);
1203
1204/**
1205 * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
1206 * @fmr: The fast memory region to associate with the pages.
1207 * @page_list: An array of physical pages to map to the fast memory region.
1208 * @list_len: The number of pages in page_list.
1209 * @iova: The I/O virtual address to use with the mapped region.
1210 */
1211static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
1212 u64 *page_list, int list_len,
1213 u64 iova)
1214{
1215 return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova);
1216}
1217
1218/**
1219 * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
1220 * @fmr_list: A linked list of fast memory regions to unmap.
1221 */
1222int ib_unmap_fmr(struct list_head *fmr_list);
1223
1224/**
1225 * ib_dealloc_fmr - Deallocates a fast memory region.
1226 * @fmr: The fast memory region to deallocate.
1227 */
1228int ib_dealloc_fmr(struct ib_fmr *fmr);
1229
1230/**
1231 * ib_attach_mcast - Attaches the specified QP to a multicast group.
1232 * @qp: QP to attach to the multicast group. The QP must be type
1233 * IB_QPT_UD.
1234 * @gid: Multicast group GID.
1235 * @lid: Multicast group LID in host byte order.
1236 *
1237 * In order to send and receive multicast packets, subnet
1238 * administration must have created the multicast group and configured
1239 * the fabric appropriately. The port associated with the specified
1240 * QP must also be a member of the multicast group.
1241 */
1242int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1243
1244/**
1245 * ib_detach_mcast - Detaches the specified QP from a multicast group.
1246 * @qp: QP to detach from the multicast group.
1247 * @gid: Multicast group GID.
1248 * @lid: Multicast group LID in host byte order.
1249 */
1250int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid);
1251
1252#endif /* IB_VERBS_H */