diff options
author | Hal Rosenstock <halr@voltaire.com> | 2005-07-27 14:45:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-07-27 19:26:14 -0400 |
commit | cb183a06b381652b7637fedfa7ef85ec0baf2a1f (patch) | |
tree | 8b559d3fd574af62dfbe39751980d5cb71e32598 | |
parent | 3f75daddb4fc6b695faa4e12e76894389e913dcb (diff) |
[PATCH] IB: Implementation for RMPP support in user MAD
Implementation for RMPP support in user MAD
Signed-off-by: Hal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 300 |
1 files changed, 194 insertions, 106 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 088bb1f0f514..2e38792df533 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1,5 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | 2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. |
3 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | ||
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | ||
3 | * | 5 | * |
4 | * This software is available to you under a choice of one of two | 6 | * This software is available to you under a choice of one of two |
5 | * licenses. You may choose to be licensed under the terms of the GNU | 7 | * licenses. You may choose to be licensed under the terms of the GNU |
@@ -29,7 +31,7 @@ | |||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 31 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. | 32 | * SOFTWARE. |
31 | * | 33 | * |
32 | * $Id: user_mad.c 1389 2004-12-27 22:56:47Z roland $ | 34 | * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $ |
33 | */ | 35 | */ |
34 | 36 | ||
35 | #include <linux/module.h> | 37 | #include <linux/module.h> |
@@ -94,10 +96,12 @@ struct ib_umad_file { | |||
94 | }; | 96 | }; |
95 | 97 | ||
96 | struct ib_umad_packet { | 98 | struct ib_umad_packet { |
97 | struct ib_user_mad mad; | ||
98 | struct ib_ah *ah; | 99 | struct ib_ah *ah; |
100 | struct ib_mad_send_buf *msg; | ||
99 | struct list_head list; | 101 | struct list_head list; |
102 | int length; | ||
100 | DECLARE_PCI_UNMAP_ADDR(mapping) | 103 | DECLARE_PCI_UNMAP_ADDR(mapping) |
104 | struct ib_user_mad mad; | ||
101 | }; | 105 | }; |
102 | 106 | ||
103 | static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); | 107 | static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE); |
@@ -114,10 +118,10 @@ static int queue_packet(struct ib_umad_file *file, | |||
114 | int ret = 1; | 118 | int ret = 1; |
115 | 119 | ||
116 | down_read(&file->agent_mutex); | 120 | down_read(&file->agent_mutex); |
117 | for (packet->mad.id = 0; | 121 | for (packet->mad.hdr.id = 0; |
118 | packet->mad.id < IB_UMAD_MAX_AGENTS; | 122 | packet->mad.hdr.id < IB_UMAD_MAX_AGENTS; |
119 | packet->mad.id++) | 123 | packet->mad.hdr.id++) |
120 | if (agent == file->agent[packet->mad.id]) { | 124 | if (agent == file->agent[packet->mad.hdr.id]) { |
121 | spin_lock_irq(&file->recv_lock); | 125 | spin_lock_irq(&file->recv_lock); |
122 | list_add_tail(&packet->list, &file->recv_list); | 126 | list_add_tail(&packet->list, &file->recv_list); |
123 | spin_unlock_irq(&file->recv_lock); | 127 | spin_unlock_irq(&file->recv_lock); |
@@ -135,22 +139,30 @@ static void send_handler(struct ib_mad_agent *agent, | |||
135 | struct ib_mad_send_wc *send_wc) | 139 | struct ib_mad_send_wc *send_wc) |
136 | { | 140 | { |
137 | struct ib_umad_file *file = agent->context; | 141 | struct ib_umad_file *file = agent->context; |
138 | struct ib_umad_packet *packet = | 142 | struct ib_umad_packet *timeout, *packet = |
139 | (void *) (unsigned long) send_wc->wr_id; | 143 | (void *) (unsigned long) send_wc->wr_id; |
140 | 144 | ||
141 | dma_unmap_single(agent->device->dma_device, | 145 | ib_destroy_ah(packet->msg->send_wr.wr.ud.ah); |
142 | pci_unmap_addr(packet, mapping), | 146 | ib_free_send_mad(packet->msg); |
143 | sizeof packet->mad.data, | ||
144 | DMA_TO_DEVICE); | ||
145 | ib_destroy_ah(packet->ah); | ||
146 | 147 | ||
147 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { | 148 | if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { |
148 | packet->mad.status = ETIMEDOUT; | 149 | timeout = kmalloc(sizeof *timeout + sizeof (struct ib_mad_hdr), |
150 | GFP_KERNEL); | ||
151 | if (!timeout) | ||
152 | goto out; | ||
149 | 153 | ||
150 | if (!queue_packet(file, agent, packet)) | 154 | memset(timeout, 0, sizeof *timeout + sizeof (struct ib_mad_hdr)); |
151 | return; | ||
152 | } | ||
153 | 155 | ||
156 | timeout->length = sizeof (struct ib_mad_hdr); | ||
157 | timeout->mad.hdr.id = packet->mad.hdr.id; | ||
158 | timeout->mad.hdr.status = ETIMEDOUT; | ||
159 | memcpy(timeout->mad.data, packet->mad.data, | ||
160 | sizeof (struct ib_mad_hdr)); | ||
161 | |||
162 | if (!queue_packet(file, agent, timeout)) | ||
163 | return; | ||
164 | } | ||
165 | out: | ||
154 | kfree(packet); | 166 | kfree(packet); |
155 | } | 167 | } |
156 | 168 | ||
@@ -159,30 +171,35 @@ static void recv_handler(struct ib_mad_agent *agent, | |||
159 | { | 171 | { |
160 | struct ib_umad_file *file = agent->context; | 172 | struct ib_umad_file *file = agent->context; |
161 | struct ib_umad_packet *packet; | 173 | struct ib_umad_packet *packet; |
174 | int length; | ||
162 | 175 | ||
163 | if (mad_recv_wc->wc->status != IB_WC_SUCCESS) | 176 | if (mad_recv_wc->wc->status != IB_WC_SUCCESS) |
164 | goto out; | 177 | goto out; |
165 | 178 | ||
166 | packet = kmalloc(sizeof *packet, GFP_KERNEL); | 179 | length = mad_recv_wc->mad_len; |
180 | packet = kmalloc(sizeof *packet + length, GFP_KERNEL); | ||
167 | if (!packet) | 181 | if (!packet) |
168 | goto out; | 182 | goto out; |
169 | 183 | ||
170 | memset(packet, 0, sizeof *packet); | 184 | memset(packet, 0, sizeof *packet + length); |
185 | packet->length = length; | ||
186 | |||
187 | ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data); | ||
171 | 188 | ||
172 | memcpy(packet->mad.data, mad_recv_wc->recv_buf.mad, sizeof packet->mad.data); | 189 | packet->mad.hdr.status = 0; |
173 | packet->mad.status = 0; | 190 | packet->mad.hdr.length = length + sizeof (struct ib_user_mad); |
174 | packet->mad.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); | 191 | packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); |
175 | packet->mad.lid = cpu_to_be16(mad_recv_wc->wc->slid); | 192 | packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); |
176 | packet->mad.sl = mad_recv_wc->wc->sl; | 193 | packet->mad.hdr.sl = mad_recv_wc->wc->sl; |
177 | packet->mad.path_bits = mad_recv_wc->wc->dlid_path_bits; | 194 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; |
178 | packet->mad.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); | 195 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); |
179 | if (packet->mad.grh_present) { | 196 | if (packet->mad.hdr.grh_present) { |
180 | /* XXX parse GRH */ | 197 | /* XXX parse GRH */ |
181 | packet->mad.gid_index = 0; | 198 | packet->mad.hdr.gid_index = 0; |
182 | packet->mad.hop_limit = 0; | 199 | packet->mad.hdr.hop_limit = 0; |
183 | packet->mad.traffic_class = 0; | 200 | packet->mad.hdr.traffic_class = 0; |
184 | memset(packet->mad.gid, 0, 16); | 201 | memset(packet->mad.hdr.gid, 0, 16); |
185 | packet->mad.flow_label = 0; | 202 | packet->mad.hdr.flow_label = 0; |
186 | } | 203 | } |
187 | 204 | ||
188 | if (queue_packet(file, agent, packet)) | 205 | if (queue_packet(file, agent, packet)) |
@@ -199,7 +216,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
199 | struct ib_umad_packet *packet; | 216 | struct ib_umad_packet *packet; |
200 | ssize_t ret; | 217 | ssize_t ret; |
201 | 218 | ||
202 | if (count < sizeof (struct ib_user_mad)) | 219 | if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad)) |
203 | return -EINVAL; | 220 | return -EINVAL; |
204 | 221 | ||
205 | spin_lock_irq(&file->recv_lock); | 222 | spin_lock_irq(&file->recv_lock); |
@@ -222,12 +239,25 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
222 | 239 | ||
223 | spin_unlock_irq(&file->recv_lock); | 240 | spin_unlock_irq(&file->recv_lock); |
224 | 241 | ||
225 | if (copy_to_user(buf, &packet->mad, sizeof packet->mad)) | 242 | if (count < packet->length + sizeof (struct ib_user_mad)) { |
243 | /* Return length needed (and first RMPP segment) if too small */ | ||
244 | if (copy_to_user(buf, &packet->mad, | ||
245 | sizeof (struct ib_user_mad) + sizeof (struct ib_mad))) | ||
246 | ret = -EFAULT; | ||
247 | else | ||
248 | ret = -ENOSPC; | ||
249 | } else if (copy_to_user(buf, &packet->mad, | ||
250 | packet->length + sizeof (struct ib_user_mad))) | ||
226 | ret = -EFAULT; | 251 | ret = -EFAULT; |
227 | else | 252 | else |
228 | ret = sizeof packet->mad; | 253 | ret = packet->length + sizeof (struct ib_user_mad); |
229 | 254 | if (ret < 0) { | |
230 | kfree(packet); | 255 | /* Requeue packet */ |
256 | spin_lock_irq(&file->recv_lock); | ||
257 | list_add(&packet->list, &file->recv_list); | ||
258 | spin_unlock_irq(&file->recv_lock); | ||
259 | } else | ||
260 | kfree(packet); | ||
231 | return ret; | 261 | return ret; |
232 | } | 262 | } |
233 | 263 | ||
@@ -238,69 +268,57 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
238 | struct ib_umad_packet *packet; | 268 | struct ib_umad_packet *packet; |
239 | struct ib_mad_agent *agent; | 269 | struct ib_mad_agent *agent; |
240 | struct ib_ah_attr ah_attr; | 270 | struct ib_ah_attr ah_attr; |
241 | struct ib_sge gather_list; | 271 | struct ib_send_wr *bad_wr; |
242 | struct ib_send_wr *bad_wr, wr = { | 272 | struct ib_rmpp_mad *rmpp_mad; |
243 | .opcode = IB_WR_SEND, | ||
244 | .sg_list = &gather_list, | ||
245 | .num_sge = 1, | ||
246 | .send_flags = IB_SEND_SIGNALED, | ||
247 | }; | ||
248 | u8 method; | 273 | u8 method; |
249 | u64 *tid; | 274 | u64 *tid; |
250 | int ret; | 275 | int ret, length, hdr_len, data_len, rmpp_hdr_size; |
276 | int rmpp_active = 0; | ||
251 | 277 | ||
252 | if (count < sizeof (struct ib_user_mad)) | 278 | if (count < sizeof (struct ib_user_mad)) |
253 | return -EINVAL; | 279 | return -EINVAL; |
254 | 280 | ||
255 | packet = kmalloc(sizeof *packet, GFP_KERNEL); | 281 | length = count - sizeof (struct ib_user_mad); |
282 | packet = kmalloc(sizeof *packet + sizeof(struct ib_mad_hdr) + | ||
283 | sizeof(struct ib_rmpp_hdr), GFP_KERNEL); | ||
256 | if (!packet) | 284 | if (!packet) |
257 | return -ENOMEM; | 285 | return -ENOMEM; |
258 | 286 | ||
259 | if (copy_from_user(&packet->mad, buf, sizeof packet->mad)) { | 287 | if (copy_from_user(&packet->mad, buf, |
260 | kfree(packet); | 288 | sizeof (struct ib_user_mad) + |
261 | return -EFAULT; | 289 | sizeof(struct ib_mad_hdr) + |
290 | sizeof(struct ib_rmpp_hdr))) { | ||
291 | ret = -EFAULT; | ||
292 | goto err; | ||
262 | } | 293 | } |
263 | 294 | ||
264 | if (packet->mad.id < 0 || packet->mad.id >= IB_UMAD_MAX_AGENTS) { | 295 | if (packet->mad.hdr.id < 0 || |
296 | packet->mad.hdr.id >= IB_UMAD_MAX_AGENTS) { | ||
265 | ret = -EINVAL; | 297 | ret = -EINVAL; |
266 | goto err; | 298 | goto err; |
267 | } | 299 | } |
268 | 300 | ||
301 | packet->length = length; | ||
302 | |||
269 | down_read(&file->agent_mutex); | 303 | down_read(&file->agent_mutex); |
270 | 304 | ||
271 | agent = file->agent[packet->mad.id]; | 305 | agent = file->agent[packet->mad.hdr.id]; |
272 | if (!agent) { | 306 | if (!agent) { |
273 | ret = -EINVAL; | 307 | ret = -EINVAL; |
274 | goto err_up; | 308 | goto err_up; |
275 | } | 309 | } |
276 | 310 | ||
277 | /* | ||
278 | * If userspace is generating a request that will generate a | ||
279 | * response, we need to make sure the high-order part of the | ||
280 | * transaction ID matches the agent being used to send the | ||
281 | * MAD. | ||
282 | */ | ||
283 | method = ((struct ib_mad_hdr *) packet->mad.data)->method; | ||
284 | |||
285 | if (!(method & IB_MGMT_METHOD_RESP) && | ||
286 | method != IB_MGMT_METHOD_TRAP_REPRESS && | ||
287 | method != IB_MGMT_METHOD_SEND) { | ||
288 | tid = &((struct ib_mad_hdr *) packet->mad.data)->tid; | ||
289 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | ||
290 | (be64_to_cpup(tid) & 0xffffffff)); | ||
291 | } | ||
292 | |||
293 | memset(&ah_attr, 0, sizeof ah_attr); | 311 | memset(&ah_attr, 0, sizeof ah_attr); |
294 | ah_attr.dlid = be16_to_cpu(packet->mad.lid); | 312 | ah_attr.dlid = be16_to_cpu(packet->mad.hdr.lid); |
295 | ah_attr.sl = packet->mad.sl; | 313 | ah_attr.sl = packet->mad.hdr.sl; |
296 | ah_attr.src_path_bits = packet->mad.path_bits; | 314 | ah_attr.src_path_bits = packet->mad.hdr.path_bits; |
297 | ah_attr.port_num = file->port->port_num; | 315 | ah_attr.port_num = file->port->port_num; |
298 | if (packet->mad.grh_present) { | 316 | if (packet->mad.hdr.grh_present) { |
299 | ah_attr.ah_flags = IB_AH_GRH; | 317 | ah_attr.ah_flags = IB_AH_GRH; |
300 | memcpy(ah_attr.grh.dgid.raw, packet->mad.gid, 16); | 318 | memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16); |
301 | ah_attr.grh.flow_label = packet->mad.flow_label; | 319 | ah_attr.grh.flow_label = packet->mad.hdr.flow_label; |
302 | ah_attr.grh.hop_limit = packet->mad.hop_limit; | 320 | ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit; |
303 | ah_attr.grh.traffic_class = packet->mad.traffic_class; | 321 | ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class; |
304 | } | 322 | } |
305 | 323 | ||
306 | packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); | 324 | packet->ah = ib_create_ah(agent->qp->pd, &ah_attr); |
@@ -309,35 +327,104 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
309 | goto err_up; | 327 | goto err_up; |
310 | } | 328 | } |
311 | 329 | ||
312 | gather_list.addr = dma_map_single(agent->device->dma_device, | 330 | rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data; |
313 | packet->mad.data, | 331 | if (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE) { |
314 | sizeof packet->mad.data, | 332 | /* RMPP active */ |
315 | DMA_TO_DEVICE); | 333 | if (!agent->rmpp_version) { |
316 | gather_list.length = sizeof packet->mad.data; | 334 | ret = -EINVAL; |
317 | gather_list.lkey = file->mr[packet->mad.id]->lkey; | 335 | goto err_ah; |
318 | pci_unmap_addr_set(packet, mapping, gather_list.addr); | 336 | } |
337 | /* Validate that management class can support RMPP */ | ||
338 | if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { | ||
339 | hdr_len = offsetof(struct ib_sa_mad, data); | ||
340 | data_len = length; | ||
341 | } else if ((rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) && | ||
342 | (rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END)) { | ||
343 | hdr_len = offsetof(struct ib_vendor_mad, data); | ||
344 | data_len = length - hdr_len; | ||
345 | } else { | ||
346 | ret = -EINVAL; | ||
347 | goto err_ah; | ||
348 | } | ||
349 | rmpp_active = 1; | ||
350 | } else { | ||
351 | if (length > sizeof(struct ib_mad)) { | ||
352 | ret = -EINVAL; | ||
353 | goto err_ah; | ||
354 | } | ||
355 | hdr_len = offsetof(struct ib_mad, data); | ||
356 | data_len = length - hdr_len; | ||
357 | } | ||
358 | |||
359 | packet->msg = ib_create_send_mad(agent, | ||
360 | be32_to_cpu(packet->mad.hdr.qpn), | ||
361 | 0, packet->ah, rmpp_active, | ||
362 | hdr_len, data_len, | ||
363 | GFP_KERNEL); | ||
364 | if (IS_ERR(packet->msg)) { | ||
365 | ret = PTR_ERR(packet->msg); | ||
366 | goto err_ah; | ||
367 | } | ||
319 | 368 | ||
320 | wr.wr.ud.mad_hdr = (struct ib_mad_hdr *) packet->mad.data; | 369 | packet->msg->send_wr.wr.ud.timeout_ms = packet->mad.hdr.timeout_ms; |
321 | wr.wr.ud.ah = packet->ah; | 370 | packet->msg->send_wr.wr.ud.retries = packet->mad.hdr.retries; |
322 | wr.wr.ud.remote_qpn = be32_to_cpu(packet->mad.qpn); | ||
323 | wr.wr.ud.remote_qkey = be32_to_cpu(packet->mad.qkey); | ||
324 | wr.wr.ud.timeout_ms = packet->mad.timeout_ms; | ||
325 | wr.wr.ud.retries = 0; | ||
326 | 371 | ||
327 | wr.wr_id = (unsigned long) packet; | 372 | /* Override send WR WRID initialized in ib_create_send_mad */ |
373 | packet->msg->send_wr.wr_id = (unsigned long) packet; | ||
328 | 374 | ||
329 | ret = ib_post_send_mad(agent, &wr, &bad_wr); | 375 | if (!rmpp_active) { |
330 | if (ret) { | 376 | /* Copy message from user into send buffer */ |
331 | dma_unmap_single(agent->device->dma_device, | 377 | if (copy_from_user(packet->msg->mad, |
332 | pci_unmap_addr(packet, mapping), | 378 | buf + sizeof(struct ib_user_mad), length)) { |
333 | sizeof packet->mad.data, | 379 | ret = -EFAULT; |
334 | DMA_TO_DEVICE); | 380 | goto err_msg; |
335 | goto err_up; | 381 | } |
382 | } else { | ||
383 | rmpp_hdr_size = sizeof(struct ib_mad_hdr) + | ||
384 | sizeof(struct ib_rmpp_hdr); | ||
385 | |||
386 | /* Only copy MAD headers (RMPP header in place) */ | ||
387 | memcpy(packet->msg->mad, packet->mad.data, | ||
388 | sizeof(struct ib_mad_hdr)); | ||
389 | |||
390 | /* Now, copy rest of message from user into send buffer */ | ||
391 | if (copy_from_user(((struct ib_rmpp_mad *) packet->msg->mad)->data, | ||
392 | buf + sizeof (struct ib_user_mad) + rmpp_hdr_size, | ||
393 | length - rmpp_hdr_size)) { | ||
394 | ret = -EFAULT; | ||
395 | goto err_msg; | ||
396 | } | ||
397 | } | ||
398 | |||
399 | /* | ||
400 | * If userspace is generating a request that will generate a | ||
401 | * response, we need to make sure the high-order part of the | ||
402 | * transaction ID matches the agent being used to send the | ||
403 | * MAD. | ||
404 | */ | ||
405 | method = packet->msg->mad->mad_hdr.method; | ||
406 | |||
407 | if (!(method & IB_MGMT_METHOD_RESP) && | ||
408 | method != IB_MGMT_METHOD_TRAP_REPRESS && | ||
409 | method != IB_MGMT_METHOD_SEND) { | ||
410 | tid = &packet->msg->mad->mad_hdr.tid; | ||
411 | *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 | | ||
412 | (be64_to_cpup(tid) & 0xffffffff)); | ||
336 | } | 413 | } |
337 | 414 | ||
415 | ret = ib_post_send_mad(agent, &packet->msg->send_wr, &bad_wr); | ||
416 | if (ret) | ||
417 | goto err_msg; | ||
418 | |||
338 | up_read(&file->agent_mutex); | 419 | up_read(&file->agent_mutex); |
339 | 420 | ||
340 | return sizeof packet->mad; | 421 | return sizeof (struct ib_user_mad_hdr) + packet->length; |
422 | |||
423 | err_msg: | ||
424 | ib_free_send_mad(packet->msg); | ||
425 | |||
426 | err_ah: | ||
427 | ib_destroy_ah(packet->ah); | ||
341 | 428 | ||
342 | err_up: | 429 | err_up: |
343 | up_read(&file->agent_mutex); | 430 | up_read(&file->agent_mutex); |
@@ -400,7 +487,8 @@ found: | |||
400 | agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, | 487 | agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num, |
401 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, | 488 | ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI, |
402 | ureq.mgmt_class ? &req : NULL, | 489 | ureq.mgmt_class ? &req : NULL, |
403 | 0, send_handler, recv_handler, file); | 490 | ureq.rmpp_version, |
491 | send_handler, recv_handler, file); | ||
404 | if (IS_ERR(agent)) { | 492 | if (IS_ERR(agent)) { |
405 | ret = PTR_ERR(agent); | 493 | ret = PTR_ERR(agent); |
406 | goto out; | 494 | goto out; |
@@ -461,8 +549,8 @@ out: | |||
461 | return ret; | 549 | return ret; |
462 | } | 550 | } |
463 | 551 | ||
464 | static long ib_umad_ioctl(struct file *filp, | 552 | static long ib_umad_ioctl(struct file *filp, unsigned int cmd, |
465 | unsigned int cmd, unsigned long arg) | 553 | unsigned long arg) |
466 | { | 554 | { |
467 | switch (cmd) { | 555 | switch (cmd) { |
468 | case IB_USER_MAD_REGISTER_AGENT: | 556 | case IB_USER_MAD_REGISTER_AGENT: |
@@ -518,14 +606,14 @@ static int ib_umad_close(struct inode *inode, struct file *filp) | |||
518 | } | 606 | } |
519 | 607 | ||
520 | static struct file_operations umad_fops = { | 608 | static struct file_operations umad_fops = { |
521 | .owner = THIS_MODULE, | 609 | .owner = THIS_MODULE, |
522 | .read = ib_umad_read, | 610 | .read = ib_umad_read, |
523 | .write = ib_umad_write, | 611 | .write = ib_umad_write, |
524 | .poll = ib_umad_poll, | 612 | .poll = ib_umad_poll, |
525 | .unlocked_ioctl = ib_umad_ioctl, | 613 | .unlocked_ioctl = ib_umad_ioctl, |
526 | .compat_ioctl = ib_umad_ioctl, | 614 | .compat_ioctl = ib_umad_ioctl, |
527 | .open = ib_umad_open, | 615 | .open = ib_umad_open, |
528 | .release = ib_umad_close | 616 | .release = ib_umad_close |
529 | }; | 617 | }; |
530 | 618 | ||
531 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) | 619 | static int ib_umad_sm_open(struct inode *inode, struct file *filp) |