aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorHal Rosenstock <halr@voltaire.com>2005-07-27 14:45:36 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-27 19:26:12 -0400
commitd2082ee516200095956bd66279be4f62f4a5843d (patch)
treed8e08d2af67730a312e000833971ef281669e32c /drivers
parent497677ab940e637a41351dca6610bc4320abc8f1 (diff)
[PATCH] IB: Introduce RMPP APIs
Introduce RMPP APIs Signed-off-by: Sean Hefty <sean.hefty@intel.com> Signed-off-by: Hal Rosenstock <halr@voltaire.com> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/infiniband/core/mad.c4
-rw-r--r--drivers/infiniband/core/sa_query.c20
-rw-r--r--drivers/infiniband/include/ib_mad.h132
-rw-r--r--drivers/infiniband/include/ib_sa.h4
4 files changed, 125 insertions, 35 deletions
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 8216af0ba78..26e2b59ce5a 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -777,7 +777,7 @@ static int get_buf_length(int hdr_len, int data_len)
777 777
778struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 778struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
779 u32 remote_qpn, u16 pkey_index, 779 u32 remote_qpn, u16 pkey_index,
780 struct ib_ah *ah, 780 struct ib_ah *ah, int rmpp_active,
781 int hdr_len, int data_len, 781 int hdr_len, int data_len,
782 unsigned int __nocast gfp_mask) 782 unsigned int __nocast gfp_mask)
783{ 783{
@@ -786,6 +786,8 @@ struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
786 int buf_size; 786 int buf_size;
787 void *buf; 787 void *buf;
788 788
789 if (rmpp_active)
790 return ERR_PTR(-EINVAL); /* until RMPP implemented */
789 mad_agent_priv = container_of(mad_agent, 791 mad_agent_priv = container_of(mad_agent,
790 struct ib_mad_agent_private, agent); 792 struct ib_mad_agent_private, agent);
791 buf_size = get_buf_length(hdr_len, data_len); 793 buf_size = get_buf_length(hdr_len, data_len);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 9ef8fe0163d..4ec80443702 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -50,26 +50,6 @@ MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 50MODULE_DESCRIPTION("InfiniBand subnet administration query support");
51MODULE_LICENSE("Dual BSD/GPL"); 51MODULE_LICENSE("Dual BSD/GPL");
52 52
53/*
54 * These two structures must be packed because they have 64-bit fields
55 * that are only 32-bit aligned. 64-bit architectures will lay them
56 * out wrong otherwise. (And unfortunately they are sent on the wire
57 * so we can't change the layout)
58 */
59struct ib_sa_hdr {
60 u64 sm_key;
61 u16 attr_offset;
62 u16 reserved;
63 ib_sa_comp_mask comp_mask;
64} __attribute__ ((packed));
65
66struct ib_sa_mad {
67 struct ib_mad_hdr mad_hdr;
68 struct ib_rmpp_hdr rmpp_hdr;
69 struct ib_sa_hdr sa_hdr;
70 u8 data[200];
71} __attribute__ ((packed));
72
73struct ib_sa_sm_ah { 53struct ib_sa_sm_ah {
74 struct ib_ah *ah; 54 struct ib_ah *ah;
75 struct kref ref; 55 struct kref ref;
diff --git a/drivers/infiniband/include/ib_mad.h b/drivers/infiniband/include/ib_mad.h
index 817e932c79c..491b6f25b3b 100644
--- a/drivers/infiniband/include/ib_mad.h
+++ b/drivers/infiniband/include/ib_mad.h
@@ -33,7 +33,7 @@
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 * SOFTWARE. 34 * SOFTWARE.
35 * 35 *
36 * $Id: ib_mad.h 1389 2004-12-27 22:56:47Z roland $ 36 * $Id: ib_mad.h 2775 2005-07-02 13:42:12Z halr $
37 */ 37 */
38 38
39#if !defined( IB_MAD_H ) 39#if !defined( IB_MAD_H )
@@ -58,6 +58,8 @@
58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 58#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30
59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F 59#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F
60 60
61#define IB_OPENIB_OUI (0x001405)
62
61/* Management methods */ 63/* Management methods */
62#define IB_MGMT_METHOD_GET 0x01 64#define IB_MGMT_METHOD_GET 0x01
63#define IB_MGMT_METHOD_SET 0x02 65#define IB_MGMT_METHOD_SET 0x02
@@ -72,6 +74,33 @@
72 74
73#define IB_MGMT_MAX_METHODS 128 75#define IB_MGMT_MAX_METHODS 128
74 76
77/* RMPP information */
78#define IB_MGMT_RMPP_VERSION 1
79
80#define IB_MGMT_RMPP_TYPE_DATA 1
81#define IB_MGMT_RMPP_TYPE_ACK 2
82#define IB_MGMT_RMPP_TYPE_STOP 3
83#define IB_MGMT_RMPP_TYPE_ABORT 4
84
85#define IB_MGMT_RMPP_FLAG_ACTIVE 1
86#define IB_MGMT_RMPP_FLAG_FIRST (1<<1)
87#define IB_MGMT_RMPP_FLAG_LAST (1<<2)
88
89#define IB_MGMT_RMPP_NO_RESPTIME 0x1F
90
91#define IB_MGMT_RMPP_STATUS_SUCCESS 0
92#define IB_MGMT_RMPP_STATUS_RESX 1
93#define IB_MGMT_RMPP_STATUS_T2L 118
94#define IB_MGMT_RMPP_STATUS_BAD_LEN 119
95#define IB_MGMT_RMPP_STATUS_BAD_SEG 120
96#define IB_MGMT_RMPP_STATUS_BADT 121
97#define IB_MGMT_RMPP_STATUS_W2S 122
98#define IB_MGMT_RMPP_STATUS_S2B 123
99#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124
100#define IB_MGMT_RMPP_STATUS_UNV 125
101#define IB_MGMT_RMPP_STATUS_TMR 126
102#define IB_MGMT_RMPP_STATUS_UNSPEC 127
103
75#define IB_QP0 0 104#define IB_QP0 0
76#define IB_QP1 __constant_htonl(1) 105#define IB_QP1 __constant_htonl(1)
77#define IB_QP1_QKEY 0x80010000 106#define IB_QP1_QKEY 0x80010000
@@ -88,7 +117,7 @@ struct ib_mad_hdr {
88 u16 attr_id; 117 u16 attr_id;
89 u16 resv; 118 u16 resv;
90 u32 attr_mod; 119 u32 attr_mod;
91} __attribute__ ((packed)); 120};
92 121
93struct ib_rmpp_hdr { 122struct ib_rmpp_hdr {
94 u8 rmpp_version; 123 u8 rmpp_version;
@@ -97,17 +126,41 @@ struct ib_rmpp_hdr {
97 u8 rmpp_status; 126 u8 rmpp_status;
98 u32 seg_num; 127 u32 seg_num;
99 u32 paylen_newwin; 128 u32 paylen_newwin;
129};
130
131typedef u64 __bitwise ib_sa_comp_mask;
132
133#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
134
135/*
136 * ib_sa_hdr and ib_sa_mad structures must be packed because they have
137 * 64-bit fields that are only 32-bit aligned. 64-bit architectures will
138 * lay them out wrong otherwise. (And unfortunately they are sent on
139 * the wire so we can't change the layout)
140 */
141struct ib_sa_hdr {
142 u64 sm_key;
143 u16 attr_offset;
144 u16 reserved;
145 ib_sa_comp_mask comp_mask;
100} __attribute__ ((packed)); 146} __attribute__ ((packed));
101 147
102struct ib_mad { 148struct ib_mad {
103 struct ib_mad_hdr mad_hdr; 149 struct ib_mad_hdr mad_hdr;
104 u8 data[232]; 150 u8 data[232];
105} __attribute__ ((packed)); 151};
106 152
107struct ib_rmpp_mad { 153struct ib_rmpp_mad {
108 struct ib_mad_hdr mad_hdr; 154 struct ib_mad_hdr mad_hdr;
109 struct ib_rmpp_hdr rmpp_hdr; 155 struct ib_rmpp_hdr rmpp_hdr;
110 u8 data[220]; 156 u8 data[220];
157};
158
159struct ib_sa_mad {
160 struct ib_mad_hdr mad_hdr;
161 struct ib_rmpp_hdr rmpp_hdr;
162 struct ib_sa_hdr sa_hdr;
163 u8 data[200];
111} __attribute__ ((packed)); 164} __attribute__ ((packed));
112 165
113struct ib_vendor_mad { 166struct ib_vendor_mad {
@@ -116,7 +169,7 @@ struct ib_vendor_mad {
116 u8 reserved; 169 u8 reserved;
117 u8 oui[3]; 170 u8 oui[3];
118 u8 data[216]; 171 u8 data[216];
119} __attribute__ ((packed)); 172};
120 173
121/** 174/**
122 * ib_mad_send_buf - MAD data buffer and work request for sends. 175 * ib_mad_send_buf - MAD data buffer and work request for sends.
@@ -142,6 +195,45 @@ struct ib_mad_send_buf {
142 struct ib_sge sge; 195 struct ib_sge sge;
143}; 196};
144 197
198/**
199 * ib_get_rmpp_resptime - Returns the RMPP response time.
200 * @rmpp_hdr: An RMPP header.
201 */
202static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr)
203{
204 return rmpp_hdr->rmpp_rtime_flags >> 3;
205}
206
207/**
208 * ib_get_rmpp_flags - Returns the RMPP flags.
209 * @rmpp_hdr: An RMPP header.
210 */
211static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr)
212{
213 return rmpp_hdr->rmpp_rtime_flags & 0x7;
214}
215
216/**
217 * ib_set_rmpp_resptime - Sets the response time in an RMPP header.
218 * @rmpp_hdr: An RMPP header.
219 * @rtime: The response time to set.
220 */
221static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime)
222{
223 rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3);
224}
225
226/**
227 * ib_set_rmpp_flags - Sets the flags in an RMPP header.
228 * @rmpp_hdr: An RMPP header.
229 * @flags: The flags to set.
230 */
231static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags)
232{
233 rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) |
234 (flags & 0x7);
235}
236
145struct ib_mad_agent; 237struct ib_mad_agent;
146struct ib_mad_send_wc; 238struct ib_mad_send_wc;
147struct ib_mad_recv_wc; 239struct ib_mad_recv_wc;
@@ -186,6 +278,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
186 * ib_mad_agent - Used to track MAD registration with the access layer. 278 * ib_mad_agent - Used to track MAD registration with the access layer.
187 * @device: Reference to device registration is on. 279 * @device: Reference to device registration is on.
188 * @qp: Reference to QP used for sending and receiving MADs. 280 * @qp: Reference to QP used for sending and receiving MADs.
281 * @mr: Memory region for system memory usable for DMA.
189 * @recv_handler: Callback handler for a received MAD. 282 * @recv_handler: Callback handler for a received MAD.
190 * @send_handler: Callback handler for a sent MAD. 283 * @send_handler: Callback handler for a sent MAD.
191 * @snoop_handler: Callback handler for snooped sent MADs. 284 * @snoop_handler: Callback handler for snooped sent MADs.
@@ -194,6 +287,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
194 * Unsolicited MADs sent by this client will have the upper 32-bits 287 * Unsolicited MADs sent by this client will have the upper 32-bits
195 * of their TID set to this value. 288 * of their TID set to this value.
196 * @port_num: Port number on which QP is registered 289 * @port_num: Port number on which QP is registered
290 * @rmpp_version: If set, indicates the RMPP version used by this agent.
197 */ 291 */
198struct ib_mad_agent { 292struct ib_mad_agent {
199 struct ib_device *device; 293 struct ib_device *device;
@@ -205,6 +299,7 @@ struct ib_mad_agent {
205 void *context; 299 void *context;
206 u32 hi_tid; 300 u32 hi_tid;
207 u8 port_num; 301 u8 port_num;
302 u8 rmpp_version;
208}; 303};
209 304
210/** 305/**
@@ -238,6 +333,7 @@ struct ib_mad_recv_buf {
238 * ib_mad_recv_wc - received MAD information. 333 * ib_mad_recv_wc - received MAD information.
239 * @wc: Completion information for the received data. 334 * @wc: Completion information for the received data.
240 * @recv_buf: Specifies the location of the received data buffer(s). 335 * @recv_buf: Specifies the location of the received data buffer(s).
336 * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers.
241 * @mad_len: The length of the received MAD, without duplicated headers. 337 * @mad_len: The length of the received MAD, without duplicated headers.
242 * 338 *
243 * For received response, the wr_id field of the wc is set to the wr_id 339 * For received response, the wr_id field of the wc is set to the wr_id
@@ -246,6 +342,7 @@ struct ib_mad_recv_buf {
246struct ib_mad_recv_wc { 342struct ib_mad_recv_wc {
247 struct ib_wc *wc; 343 struct ib_wc *wc;
248 struct ib_mad_recv_buf recv_buf; 344 struct ib_mad_recv_buf recv_buf;
345 struct list_head rmpp_list;
249 int mad_len; 346 int mad_len;
250}; 347};
251 348
@@ -341,6 +438,16 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
341 * @bad_send_wr: Specifies the MAD on which an error was encountered. 438 * @bad_send_wr: Specifies the MAD on which an error was encountered.
342 * 439 *
343 * Sent MADs are not guaranteed to complete in the order that they were posted. 440 * Sent MADs are not guaranteed to complete in the order that they were posted.
441 *
442 * If the MAD requires RMPP, the data buffer should contain a single copy
443 * of the common MAD, RMPP, and class specific headers, followed by the class
444 * defined data. If the class defined data would not divide evenly into
445 * RMPP segments, then space must be allocated at the end of the referenced
446 * buffer for any required padding. To indicate the amount of class defined
447 * data being transferred, the paylen_newwin field in the RMPP header should
448 * be set to the size of the class specific header plus the amount of class
449 * defined data being transferred. The paylen_newwin field should be
450 * specified in network-byte order.
344 */ 451 */
345int ib_post_send_mad(struct ib_mad_agent *mad_agent, 452int ib_post_send_mad(struct ib_mad_agent *mad_agent,
346 struct ib_send_wr *send_wr, 453 struct ib_send_wr *send_wr,
@@ -353,14 +460,13 @@ int ib_post_send_mad(struct ib_mad_agent *mad_agent,
353 * referenced buffer should be at least the size of the mad_len specified 460 * referenced buffer should be at least the size of the mad_len specified
354 * by @mad_recv_wc. 461 * by @mad_recv_wc.
355 * 462 *
356 * This call copies a chain of received RMPP MADs into a single data buffer, 463 * This call copies a chain of received MAD segments into a single data buffer,
357 * removing duplicated headers. 464 * removing duplicated headers.
358 */ 465 */
359void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf); 466void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf);
360 467
361/** 468/**
362 * ib_free_recv_mad - Returns data buffers used to receive a MAD to the 469 * ib_free_recv_mad - Returns data buffers used to receive a MAD.
363 * access layer.
364 * @mad_recv_wc: Work completion information for a received MAD. 470 * @mad_recv_wc: Work completion information for a received MAD.
365 * 471 *
366 * Clients receiving MADs through their ib_mad_recv_handler must call this 472 * Clients receiving MADs through their ib_mad_recv_handler must call this
@@ -437,10 +543,11 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
437 * @pkey_index: Specifies which PKey the MAD will be sent using. This field 543 * @pkey_index: Specifies which PKey the MAD will be sent using. This field
438 * is valid only if the remote_qpn is QP 1. 544 * is valid only if the remote_qpn is QP 1.
439 * @ah: References the address handle used to transfer to the remote node. 545 * @ah: References the address handle used to transfer to the remote node.
546 * @rmpp_active: Indicates if the send will enable RMPP.
440 * @hdr_len: Indicates the size of the data header of the MAD. This length 547 * @hdr_len: Indicates the size of the data header of the MAD. This length
441 * should include the common MAD header, RMPP header, plus any class 548 * should include the common MAD header, RMPP header, plus any class
442 * specific header. 549 * specific header.
443 * @data_len: Indicates the size of any user-transfered data. The call will 550 * @data_len: Indicates the size of any user-transferred data. The call will
444 * automatically adjust the allocated buffer size to account for any 551 * automatically adjust the allocated buffer size to account for any
445 * additional padding that may be necessary. 552 * additional padding that may be necessary.
446 * @gfp_mask: GFP mask used for the memory allocation. 553 * @gfp_mask: GFP mask used for the memory allocation.
@@ -448,11 +555,16 @@ int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
448 * This is a helper routine that may be used to allocate a MAD. Users are 555 * This is a helper routine that may be used to allocate a MAD. Users are
449 * not required to allocate outbound MADs using this call. The returned 556 * not required to allocate outbound MADs using this call. The returned
450 * MAD send buffer will reference a data buffer usable for sending a MAD, along 557 * MAD send buffer will reference a data buffer usable for sending a MAD, along
451 * with an intialized work request structure. 558 * with an initialized work request structure. Users may modify the returned
559 * MAD data buffer or work request before posting the send.
560 *
561 * The returned data buffer will be cleared. Users are responsible for
562 * initializing the common MAD and any class specific headers. If @rmpp_active
563 * is set, the RMPP header will be initialized for sending.
452 */ 564 */
453struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, 565struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent,
454 u32 remote_qpn, u16 pkey_index, 566 u32 remote_qpn, u16 pkey_index,
455 struct ib_ah *ah, 567 struct ib_ah *ah, int rmpp_active,
456 int hdr_len, int data_len, 568 int hdr_len, int data_len,
457 unsigned int __nocast gfp_mask); 569 unsigned int __nocast gfp_mask);
458 570
diff --git a/drivers/infiniband/include/ib_sa.h b/drivers/infiniband/include/ib_sa.h
index 00222285eb9..49a95ca2b8f 100644
--- a/drivers/infiniband/include/ib_sa.h
+++ b/drivers/infiniband/include/ib_sa.h
@@ -87,10 +87,6 @@ static inline int ib_sa_rate_enum_to_int(enum ib_sa_rate rate)
87 } 87 }
88} 88}
89 89
90typedef u64 __bitwise ib_sa_comp_mask;
91
92#define IB_SA_COMP_MASK(n) ((__force ib_sa_comp_mask) cpu_to_be64(1ull << n))
93
94/* 90/*
95 * Structures for SA records are named "struct ib_sa_xxx_rec." No 91 * Structures for SA records are named "struct ib_sa_xxx_rec." No
96 * attempt is made to pack structures to match the physical layout of 92 * attempt is made to pack structures to match the physical layout of