aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/user_mad.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/user_mad.c')
-rw-r--r--drivers/infiniband/core/user_mad.c225
1 files changed, 151 insertions, 74 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index c908de8db5a9..fb6cd42601f9 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -31,7 +31,7 @@
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE. 32 * SOFTWARE.
33 * 33 *
34 * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $ 34 * $Id: user_mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
35 */ 35 */
36 36
37#include <linux/module.h> 37#include <linux/module.h>
@@ -121,6 +121,7 @@ struct ib_umad_file {
121 121
122struct ib_umad_packet { 122struct ib_umad_packet {
123 struct ib_mad_send_buf *msg; 123 struct ib_mad_send_buf *msg;
124 struct ib_mad_recv_wc *recv_wc;
124 struct list_head list; 125 struct list_head list;
125 int length; 126 int length;
126 struct ib_user_mad mad; 127 struct ib_user_mad mad;
@@ -176,31 +177,32 @@ static int queue_packet(struct ib_umad_file *file,
176 return ret; 177 return ret;
177} 178}
178 179
180static int data_offset(u8 mgmt_class)
181{
182 if (mgmt_class == IB_MGMT_CLASS_SUBN_ADM)
183 return IB_MGMT_SA_HDR;
184 else if ((mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START) &&
185 (mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END))
186 return IB_MGMT_VENDOR_HDR;
187 else
188 return IB_MGMT_RMPP_HDR;
189}
190
179static void send_handler(struct ib_mad_agent *agent, 191static void send_handler(struct ib_mad_agent *agent,
180 struct ib_mad_send_wc *send_wc) 192 struct ib_mad_send_wc *send_wc)
181{ 193{
182 struct ib_umad_file *file = agent->context; 194 struct ib_umad_file *file = agent->context;
183 struct ib_umad_packet *timeout;
184 struct ib_umad_packet *packet = send_wc->send_buf->context[0]; 195 struct ib_umad_packet *packet = send_wc->send_buf->context[0];
185 196
186 ib_destroy_ah(packet->msg->ah); 197 ib_destroy_ah(packet->msg->ah);
187 ib_free_send_mad(packet->msg); 198 ib_free_send_mad(packet->msg);
188 199
189 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) { 200 if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
190 timeout = kzalloc(sizeof *timeout + IB_MGMT_MAD_HDR, GFP_KERNEL); 201 packet->length = IB_MGMT_MAD_HDR;
191 if (!timeout) 202 packet->mad.hdr.status = ETIMEDOUT;
192 goto out; 203 if (!queue_packet(file, agent, packet))
193 204 return;
194 timeout->length = IB_MGMT_MAD_HDR;
195 timeout->mad.hdr.id = packet->mad.hdr.id;
196 timeout->mad.hdr.status = ETIMEDOUT;
197 memcpy(timeout->mad.data, packet->mad.data,
198 sizeof (struct ib_mad_hdr));
199
200 if (queue_packet(file, agent, timeout))
201 kfree(timeout);
202 } 205 }
203out:
204 kfree(packet); 206 kfree(packet);
205} 207}
206 208
@@ -209,22 +211,20 @@ static void recv_handler(struct ib_mad_agent *agent,
209{ 211{
210 struct ib_umad_file *file = agent->context; 212 struct ib_umad_file *file = agent->context;
211 struct ib_umad_packet *packet; 213 struct ib_umad_packet *packet;
212 int length;
213 214
214 if (mad_recv_wc->wc->status != IB_WC_SUCCESS) 215 if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
215 goto out; 216 goto err1;
216 217
217 length = mad_recv_wc->mad_len; 218 packet = kzalloc(sizeof *packet, GFP_KERNEL);
218 packet = kzalloc(sizeof *packet + length, GFP_KERNEL);
219 if (!packet) 219 if (!packet)
220 goto out; 220 goto err1;
221 221
222 packet->length = length; 222 packet->length = mad_recv_wc->mad_len;
223 223 packet->recv_wc = mad_recv_wc;
224 ib_coalesce_recv_mad(mad_recv_wc, packet->mad.data);
225 224
226 packet->mad.hdr.status = 0; 225 packet->mad.hdr.status = 0;
227 packet->mad.hdr.length = length + sizeof (struct ib_user_mad); 226 packet->mad.hdr.length = sizeof (struct ib_user_mad) +
227 mad_recv_wc->mad_len;
228 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); 228 packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
229 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); 229 packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid);
230 packet->mad.hdr.sl = mad_recv_wc->wc->sl; 230 packet->mad.hdr.sl = mad_recv_wc->wc->sl;
@@ -240,12 +240,79 @@ static void recv_handler(struct ib_mad_agent *agent,
240 } 240 }
241 241
242 if (queue_packet(file, agent, packet)) 242 if (queue_packet(file, agent, packet))
243 kfree(packet); 243 goto err2;
244 return;
244 245
245out: 246err2:
247 kfree(packet);
248err1:
246 ib_free_recv_mad(mad_recv_wc); 249 ib_free_recv_mad(mad_recv_wc);
247} 250}
248 251
252static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet,
253 size_t count)
254{
255 struct ib_mad_recv_buf *recv_buf;
256 int left, seg_payload, offset, max_seg_payload;
257
258 /* We need enough room to copy the first (or only) MAD segment. */
259 recv_buf = &packet->recv_wc->recv_buf;
260 if ((packet->length <= sizeof (*recv_buf->mad) &&
261 count < sizeof (packet->mad) + packet->length) ||
262 (packet->length > sizeof (*recv_buf->mad) &&
263 count < sizeof (packet->mad) + sizeof (*recv_buf->mad)))
264 return -EINVAL;
265
266 if (copy_to_user(buf, &packet->mad, sizeof (packet->mad)))
267 return -EFAULT;
268
269 buf += sizeof (packet->mad);
270 seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad));
271 if (copy_to_user(buf, recv_buf->mad, seg_payload))
272 return -EFAULT;
273
274 if (seg_payload < packet->length) {
275 /*
276 * Multipacket RMPP MAD message. Copy remainder of message.
277 * Note that last segment may have a shorter payload.
278 */
279 if (count < sizeof (packet->mad) + packet->length) {
280 /*
281 * The buffer is too small, return the first RMPP segment,
282 * which includes the RMPP message length.
283 */
284 return -ENOSPC;
285 }
286 offset = data_offset(recv_buf->mad->mad_hdr.mgmt_class);
287 max_seg_payload = sizeof (struct ib_mad) - offset;
288
289 for (left = packet->length - seg_payload, buf += seg_payload;
290 left; left -= seg_payload, buf += seg_payload) {
291 recv_buf = container_of(recv_buf->list.next,
292 struct ib_mad_recv_buf, list);
293 seg_payload = min(left, max_seg_payload);
294 if (copy_to_user(buf, ((void *) recv_buf->mad) + offset,
295 seg_payload))
296 return -EFAULT;
297 }
298 }
299 return sizeof (packet->mad) + packet->length;
300}
301
302static ssize_t copy_send_mad(char __user *buf, struct ib_umad_packet *packet,
303 size_t count)
304{
305 ssize_t size = sizeof (packet->mad) + packet->length;
306
307 if (count < size)
308 return -EINVAL;
309
310 if (copy_to_user(buf, &packet->mad, size))
311 return -EFAULT;
312
313 return size;
314}
315
249static ssize_t ib_umad_read(struct file *filp, char __user *buf, 316static ssize_t ib_umad_read(struct file *filp, char __user *buf,
250 size_t count, loff_t *pos) 317 size_t count, loff_t *pos)
251{ 318{
@@ -253,7 +320,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
253 struct ib_umad_packet *packet; 320 struct ib_umad_packet *packet;
254 ssize_t ret; 321 ssize_t ret;
255 322
256 if (count < sizeof (struct ib_user_mad) + sizeof (struct ib_mad)) 323 if (count < sizeof (struct ib_user_mad))
257 return -EINVAL; 324 return -EINVAL;
258 325
259 spin_lock_irq(&file->recv_lock); 326 spin_lock_irq(&file->recv_lock);
@@ -276,28 +343,44 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf,
276 343
277 spin_unlock_irq(&file->recv_lock); 344 spin_unlock_irq(&file->recv_lock);
278 345
279 if (count < packet->length + sizeof (struct ib_user_mad)) { 346 if (packet->recv_wc)
280 /* Return length needed (and first RMPP segment) if too small */ 347 ret = copy_recv_mad(buf, packet, count);
281 if (copy_to_user(buf, &packet->mad,
282 sizeof (struct ib_user_mad) + sizeof (struct ib_mad)))
283 ret = -EFAULT;
284 else
285 ret = -ENOSPC;
286 } else if (copy_to_user(buf, &packet->mad,
287 packet->length + sizeof (struct ib_user_mad)))
288 ret = -EFAULT;
289 else 348 else
290 ret = packet->length + sizeof (struct ib_user_mad); 349 ret = copy_send_mad(buf, packet, count);
350
291 if (ret < 0) { 351 if (ret < 0) {
292 /* Requeue packet */ 352 /* Requeue packet */
293 spin_lock_irq(&file->recv_lock); 353 spin_lock_irq(&file->recv_lock);
294 list_add(&packet->list, &file->recv_list); 354 list_add(&packet->list, &file->recv_list);
295 spin_unlock_irq(&file->recv_lock); 355 spin_unlock_irq(&file->recv_lock);
296 } else 356 } else {
357 if (packet->recv_wc)
358 ib_free_recv_mad(packet->recv_wc);
297 kfree(packet); 359 kfree(packet);
360 }
298 return ret; 361 return ret;
299} 362}
300 363
364static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
365{
366 int left, seg;
367
368 /* Copy class specific header */
369 if ((msg->hdr_len > IB_MGMT_RMPP_HDR) &&
370 copy_from_user(msg->mad + IB_MGMT_RMPP_HDR, buf + IB_MGMT_RMPP_HDR,
371 msg->hdr_len - IB_MGMT_RMPP_HDR))
372 return -EFAULT;
373
374 /* All headers are in place. Copy data segments. */
375 for (seg = 1, left = msg->data_len, buf += msg->hdr_len; left > 0;
376 seg++, left -= msg->seg_size, buf += msg->seg_size) {
377 if (copy_from_user(ib_get_rmpp_segment(msg, seg), buf,
378 min(left, msg->seg_size)))
379 return -EFAULT;
380 }
381 return 0;
382}
383
301static ssize_t ib_umad_write(struct file *filp, const char __user *buf, 384static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
302 size_t count, loff_t *pos) 385 size_t count, loff_t *pos)
303{ 386{
@@ -309,14 +392,12 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
309 struct ib_rmpp_mad *rmpp_mad; 392 struct ib_rmpp_mad *rmpp_mad;
310 u8 method; 393 u8 method;
311 __be64 *tid; 394 __be64 *tid;
312 int ret, length, hdr_len, copy_offset; 395 int ret, data_len, hdr_len, copy_offset, rmpp_active;
313 int rmpp_active, has_rmpp_header;
314 396
315 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) 397 if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)
316 return -EINVAL; 398 return -EINVAL;
317 399
318 length = count - sizeof (struct ib_user_mad); 400 packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
319 packet = kmalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL);
320 if (!packet) 401 if (!packet)
321 return -ENOMEM; 402 return -ENOMEM;
322 403
@@ -363,35 +444,25 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
363 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) { 444 if (rmpp_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_ADM) {
364 hdr_len = IB_MGMT_SA_HDR; 445 hdr_len = IB_MGMT_SA_HDR;
365 copy_offset = IB_MGMT_RMPP_HDR; 446 copy_offset = IB_MGMT_RMPP_HDR;
366 has_rmpp_header = 1; 447 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
448 IB_MGMT_RMPP_FLAG_ACTIVE;
367 } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START && 449 } else if (rmpp_mad->mad_hdr.mgmt_class >= IB_MGMT_CLASS_VENDOR_RANGE2_START &&
368 rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) { 450 rmpp_mad->mad_hdr.mgmt_class <= IB_MGMT_CLASS_VENDOR_RANGE2_END) {
369 hdr_len = IB_MGMT_VENDOR_HDR; 451 hdr_len = IB_MGMT_VENDOR_HDR;
370 copy_offset = IB_MGMT_RMPP_HDR; 452 copy_offset = IB_MGMT_RMPP_HDR;
371 has_rmpp_header = 1; 453 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
454 IB_MGMT_RMPP_FLAG_ACTIVE;
372 } else { 455 } else {
373 hdr_len = IB_MGMT_MAD_HDR; 456 hdr_len = IB_MGMT_MAD_HDR;
374 copy_offset = IB_MGMT_MAD_HDR; 457 copy_offset = IB_MGMT_MAD_HDR;
375 has_rmpp_header = 0;
376 }
377
378 if (has_rmpp_header)
379 rmpp_active = ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) &
380 IB_MGMT_RMPP_FLAG_ACTIVE;
381 else
382 rmpp_active = 0; 458 rmpp_active = 0;
383
384 /* Validate that the management class can support RMPP */
385 if (rmpp_active && !agent->rmpp_version) {
386 ret = -EINVAL;
387 goto err_ah;
388 } 459 }
389 460
461 data_len = count - sizeof (struct ib_user_mad) - hdr_len;
390 packet->msg = ib_create_send_mad(agent, 462 packet->msg = ib_create_send_mad(agent,
391 be32_to_cpu(packet->mad.hdr.qpn), 463 be32_to_cpu(packet->mad.hdr.qpn),
392 0, rmpp_active, 464 0, rmpp_active, hdr_len,
393 hdr_len, length - hdr_len, 465 data_len, GFP_KERNEL);
394 GFP_KERNEL);
395 if (IS_ERR(packet->msg)) { 466 if (IS_ERR(packet->msg)) {
396 ret = PTR_ERR(packet->msg); 467 ret = PTR_ERR(packet->msg);
397 goto err_ah; 468 goto err_ah;
@@ -402,14 +473,21 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
402 packet->msg->retries = packet->mad.hdr.retries; 473 packet->msg->retries = packet->mad.hdr.retries;
403 packet->msg->context[0] = packet; 474 packet->msg->context[0] = packet;
404 475
405 /* Copy MAD headers (RMPP header in place) */ 476 /* Copy MAD header. Any RMPP header is already in place. */
406 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); 477 memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR);
407 /* Now, copy rest of message from user into send buffer */ 478 buf += sizeof (struct ib_user_mad);
408 if (copy_from_user(packet->msg->mad + copy_offset, 479
409 buf + sizeof (struct ib_user_mad) + copy_offset, 480 if (!rmpp_active) {
410 length - copy_offset)) { 481 if (copy_from_user(packet->msg->mad + copy_offset,
411 ret = -EFAULT; 482 buf + copy_offset,
412 goto err_msg; 483 hdr_len + data_len - copy_offset)) {
484 ret = -EFAULT;
485 goto err_msg;
486 }
487 } else {
488 ret = copy_rmpp_mad(packet->msg, buf);
489 if (ret)
490 goto err_msg;
413 } 491 }
414 492
415 /* 493 /*
@@ -433,18 +511,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
433 goto err_msg; 511 goto err_msg;
434 512
435 up_read(&file->port->mutex); 513 up_read(&file->port->mutex);
436
437 return count; 514 return count;
438 515
439err_msg: 516err_msg:
440 ib_free_send_mad(packet->msg); 517 ib_free_send_mad(packet->msg);
441
442err_ah: 518err_ah:
443 ib_destroy_ah(ah); 519 ib_destroy_ah(ah);
444
445err_up: 520err_up:
446 up_read(&file->port->mutex); 521 up_read(&file->port->mutex);
447
448err: 522err:
449 kfree(packet); 523 kfree(packet);
450 return ret; 524 return ret;
@@ -627,8 +701,11 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
627 already_dead = file->agents_dead; 701 already_dead = file->agents_dead;
628 file->agents_dead = 1; 702 file->agents_dead = 1;
629 703
630 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) 704 list_for_each_entry_safe(packet, tmp, &file->recv_list, list) {
705 if (packet->recv_wc)
706 ib_free_recv_mad(packet->recv_wc);
631 kfree(packet); 707 kfree(packet);
708 }
632 709
633 list_del(&file->port_list); 710 list_del(&file->port_list);
634 711