diff options
author | Roland Dreier <rolandd@cisco.com> | 2007-10-09 22:59:15 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2007-10-09 22:59:15 -0400 |
commit | 2be8e3ee8efd6f99ce454115c29d09750915021a (patch) | |
tree | 00e21e061a18c7bd1339ebbb637de9701863312d /drivers/infiniband | |
parent | c01759cee91379cc3cb551bfd7c76f1b51f91ca2 (diff) |
IB/umad: Add P_Key index support
Add support for setting the P_Key index of sent MADs and getting the
P_Key index of received MADs. This requires a change to the layout of
the ABI structure struct ib_user_mad_hdr, so to avoid breaking
compatibility, we default to the old (unchanged) ABI and add a new
ioctl IB_USER_MAD_ENABLE_PKEY that allows applications that are aware
of the new ABI to opt into using it.
We plan on switching to the new ABI by default in a year or so, and
this patch adds a warning that is printed when an application uses the
old ABI, to push people towards converting to the new ABI.
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Reviewed-by: Sean Hefty <sean.hefty@intel.com>
Reviewed-by: Hal Rosenstock <hal@xsigo.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/user_mad.c | 102 |
1 files changed, 73 insertions, 29 deletions
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index d97ded25c4ff..aee29139368c 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -118,6 +118,8 @@ struct ib_umad_file { | |||
118 | wait_queue_head_t recv_wait; | 118 | wait_queue_head_t recv_wait; |
119 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; | 119 | struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS]; |
120 | int agents_dead; | 120 | int agents_dead; |
121 | u8 use_pkey_index; | ||
122 | u8 already_used; | ||
121 | }; | 123 | }; |
122 | 124 | ||
123 | struct ib_umad_packet { | 125 | struct ib_umad_packet { |
@@ -147,6 +149,12 @@ static void ib_umad_release_dev(struct kref *ref) | |||
147 | kfree(dev); | 149 | kfree(dev); |
148 | } | 150 | } |
149 | 151 | ||
152 | static int hdr_size(struct ib_umad_file *file) | ||
153 | { | ||
154 | return file->use_pkey_index ? sizeof (struct ib_user_mad_hdr) : | ||
155 | sizeof (struct ib_user_mad_hdr_old); | ||
156 | } | ||
157 | |||
150 | /* caller must hold port->mutex at least for reading */ | 158 | /* caller must hold port->mutex at least for reading */ |
151 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) | 159 | static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id) |
152 | { | 160 | { |
@@ -221,13 +229,13 @@ static void recv_handler(struct ib_mad_agent *agent, | |||
221 | packet->length = mad_recv_wc->mad_len; | 229 | packet->length = mad_recv_wc->mad_len; |
222 | packet->recv_wc = mad_recv_wc; | 230 | packet->recv_wc = mad_recv_wc; |
223 | 231 | ||
224 | packet->mad.hdr.status = 0; | 232 | packet->mad.hdr.status = 0; |
225 | packet->mad.hdr.length = sizeof (struct ib_user_mad) + | 233 | packet->mad.hdr.length = hdr_size(file) + mad_recv_wc->mad_len; |
226 | mad_recv_wc->mad_len; | 234 | packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); |
227 | packet->mad.hdr.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp); | 235 | packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); |
228 | packet->mad.hdr.lid = cpu_to_be16(mad_recv_wc->wc->slid); | 236 | packet->mad.hdr.sl = mad_recv_wc->wc->sl; |
229 | packet->mad.hdr.sl = mad_recv_wc->wc->sl; | 237 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; |
230 | packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits; | 238 | packet->mad.hdr.pkey_index = mad_recv_wc->wc->pkey_index; |
231 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); | 239 | packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH); |
232 | if (packet->mad.hdr.grh_present) { | 240 | if (packet->mad.hdr.grh_present) { |
233 | struct ib_ah_attr ah_attr; | 241 | struct ib_ah_attr ah_attr; |
@@ -253,8 +261,8 @@ err1: | |||
253 | ib_free_recv_mad(mad_recv_wc); | 261 | ib_free_recv_mad(mad_recv_wc); |
254 | } | 262 | } |
255 | 263 | ||
256 | static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, | 264 | static ssize_t copy_recv_mad(struct ib_umad_file *file, char __user *buf, |
257 | size_t count) | 265 | struct ib_umad_packet *packet, size_t count) |
258 | { | 266 | { |
259 | struct ib_mad_recv_buf *recv_buf; | 267 | struct ib_mad_recv_buf *recv_buf; |
260 | int left, seg_payload, offset, max_seg_payload; | 268 | int left, seg_payload, offset, max_seg_payload; |
@@ -262,15 +270,15 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, | |||
262 | /* We need enough room to copy the first (or only) MAD segment. */ | 270 | /* We need enough room to copy the first (or only) MAD segment. */ |
263 | recv_buf = &packet->recv_wc->recv_buf; | 271 | recv_buf = &packet->recv_wc->recv_buf; |
264 | if ((packet->length <= sizeof (*recv_buf->mad) && | 272 | if ((packet->length <= sizeof (*recv_buf->mad) && |
265 | count < sizeof (packet->mad) + packet->length) || | 273 | count < hdr_size(file) + packet->length) || |
266 | (packet->length > sizeof (*recv_buf->mad) && | 274 | (packet->length > sizeof (*recv_buf->mad) && |
267 | count < sizeof (packet->mad) + sizeof (*recv_buf->mad))) | 275 | count < hdr_size(file) + sizeof (*recv_buf->mad))) |
268 | return -EINVAL; | 276 | return -EINVAL; |
269 | 277 | ||
270 | if (copy_to_user(buf, &packet->mad, sizeof (packet->mad))) | 278 | if (copy_to_user(buf, &packet->mad, hdr_size(file))) |
271 | return -EFAULT; | 279 | return -EFAULT; |
272 | 280 | ||
273 | buf += sizeof (packet->mad); | 281 | buf += hdr_size(file); |
274 | seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); | 282 | seg_payload = min_t(int, packet->length, sizeof (*recv_buf->mad)); |
275 | if (copy_to_user(buf, recv_buf->mad, seg_payload)) | 283 | if (copy_to_user(buf, recv_buf->mad, seg_payload)) |
276 | return -EFAULT; | 284 | return -EFAULT; |
@@ -280,7 +288,7 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, | |||
280 | * Multipacket RMPP MAD message. Copy remainder of message. | 288 | * Multipacket RMPP MAD message. Copy remainder of message. |
281 | * Note that last segment may have a shorter payload. | 289 | * Note that last segment may have a shorter payload. |
282 | */ | 290 | */ |
283 | if (count < sizeof (packet->mad) + packet->length) { | 291 | if (count < hdr_size(file) + packet->length) { |
284 | /* | 292 | /* |
285 | * The buffer is too small, return the first RMPP segment, | 293 | * The buffer is too small, return the first RMPP segment, |
286 | * which includes the RMPP message length. | 294 | * which includes the RMPP message length. |
@@ -300,18 +308,23 @@ static ssize_t copy_recv_mad(char __user *buf, struct ib_umad_packet *packet, | |||
300 | return -EFAULT; | 308 | return -EFAULT; |
301 | } | 309 | } |
302 | } | 310 | } |
303 | return sizeof (packet->mad) + packet->length; | 311 | return hdr_size(file) + packet->length; |
304 | } | 312 | } |
305 | 313 | ||
306 | static ssize_t copy_send_mad(char __user *buf, struct ib_umad_packet *packet, | 314 | static ssize_t copy_send_mad(struct ib_umad_file *file, char __user *buf, |
307 | size_t count) | 315 | struct ib_umad_packet *packet, size_t count) |
308 | { | 316 | { |
309 | ssize_t size = sizeof (packet->mad) + packet->length; | 317 | ssize_t size = hdr_size(file) + packet->length; |
310 | 318 | ||
311 | if (count < size) | 319 | if (count < size) |
312 | return -EINVAL; | 320 | return -EINVAL; |
313 | 321 | ||
314 | if (copy_to_user(buf, &packet->mad, size)) | 322 | if (copy_to_user(buf, &packet->mad, hdr_size(file))) |
323 | return -EFAULT; | ||
324 | |||
325 | buf += hdr_size(file); | ||
326 | |||
327 | if (copy_to_user(buf, packet->mad.data, packet->length)) | ||
315 | return -EFAULT; | 328 | return -EFAULT; |
316 | 329 | ||
317 | return size; | 330 | return size; |
@@ -324,7 +337,7 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
324 | struct ib_umad_packet *packet; | 337 | struct ib_umad_packet *packet; |
325 | ssize_t ret; | 338 | ssize_t ret; |
326 | 339 | ||
327 | if (count < sizeof (struct ib_user_mad)) | 340 | if (count < hdr_size(file)) |
328 | return -EINVAL; | 341 | return -EINVAL; |
329 | 342 | ||
330 | spin_lock_irq(&file->recv_lock); | 343 | spin_lock_irq(&file->recv_lock); |
@@ -348,9 +361,9 @@ static ssize_t ib_umad_read(struct file *filp, char __user *buf, | |||
348 | spin_unlock_irq(&file->recv_lock); | 361 | spin_unlock_irq(&file->recv_lock); |
349 | 362 | ||
350 | if (packet->recv_wc) | 363 | if (packet->recv_wc) |
351 | ret = copy_recv_mad(buf, packet, count); | 364 | ret = copy_recv_mad(file, buf, packet, count); |
352 | else | 365 | else |
353 | ret = copy_send_mad(buf, packet, count); | 366 | ret = copy_send_mad(file, buf, packet, count); |
354 | 367 | ||
355 | if (ret < 0) { | 368 | if (ret < 0) { |
356 | /* Requeue packet */ | 369 | /* Requeue packet */ |
@@ -442,15 +455,14 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
442 | __be64 *tid; | 455 | __be64 *tid; |
443 | int ret, data_len, hdr_len, copy_offset, rmpp_active; | 456 | int ret, data_len, hdr_len, copy_offset, rmpp_active; |
444 | 457 | ||
445 | if (count < sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR) | 458 | if (count < hdr_size(file) + IB_MGMT_RMPP_HDR) |
446 | return -EINVAL; | 459 | return -EINVAL; |
447 | 460 | ||
448 | packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); | 461 | packet = kzalloc(sizeof *packet + IB_MGMT_RMPP_HDR, GFP_KERNEL); |
449 | if (!packet) | 462 | if (!packet) |
450 | return -ENOMEM; | 463 | return -ENOMEM; |
451 | 464 | ||
452 | if (copy_from_user(&packet->mad, buf, | 465 | if (copy_from_user(&packet->mad, buf, hdr_size(file))) { |
453 | sizeof (struct ib_user_mad) + IB_MGMT_RMPP_HDR)) { | ||
454 | ret = -EFAULT; | 466 | ret = -EFAULT; |
455 | goto err; | 467 | goto err; |
456 | } | 468 | } |
@@ -461,6 +473,13 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
461 | goto err; | 473 | goto err; |
462 | } | 474 | } |
463 | 475 | ||
476 | buf += hdr_size(file); | ||
477 | |||
478 | if (copy_from_user(packet->mad.data, buf, IB_MGMT_RMPP_HDR)) { | ||
479 | ret = -EFAULT; | ||
480 | goto err; | ||
481 | } | ||
482 | |||
464 | down_read(&file->port->mutex); | 483 | down_read(&file->port->mutex); |
465 | 484 | ||
466 | agent = __get_agent(file, packet->mad.hdr.id); | 485 | agent = __get_agent(file, packet->mad.hdr.id); |
@@ -500,11 +519,11 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
500 | IB_MGMT_RMPP_FLAG_ACTIVE; | 519 | IB_MGMT_RMPP_FLAG_ACTIVE; |
501 | } | 520 | } |
502 | 521 | ||
503 | data_len = count - sizeof (struct ib_user_mad) - hdr_len; | 522 | data_len = count - hdr_size(file) - hdr_len; |
504 | packet->msg = ib_create_send_mad(agent, | 523 | packet->msg = ib_create_send_mad(agent, |
505 | be32_to_cpu(packet->mad.hdr.qpn), | 524 | be32_to_cpu(packet->mad.hdr.qpn), |
506 | 0, rmpp_active, hdr_len, | 525 | packet->mad.hdr.pkey_index, rmpp_active, |
507 | data_len, GFP_KERNEL); | 526 | hdr_len, data_len, GFP_KERNEL); |
508 | if (IS_ERR(packet->msg)) { | 527 | if (IS_ERR(packet->msg)) { |
509 | ret = PTR_ERR(packet->msg); | 528 | ret = PTR_ERR(packet->msg); |
510 | goto err_ah; | 529 | goto err_ah; |
@@ -517,7 +536,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf, | |||
517 | 536 | ||
518 | /* Copy MAD header. Any RMPP header is already in place. */ | 537 | /* Copy MAD header. Any RMPP header is already in place. */ |
519 | memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); | 538 | memcpy(packet->msg->mad, packet->mad.data, IB_MGMT_MAD_HDR); |
520 | buf += sizeof (struct ib_user_mad); | ||
521 | 539 | ||
522 | if (!rmpp_active) { | 540 | if (!rmpp_active) { |
523 | if (copy_from_user(packet->msg->mad + copy_offset, | 541 | if (copy_from_user(packet->msg->mad + copy_offset, |
@@ -646,6 +664,16 @@ found: | |||
646 | goto out; | 664 | goto out; |
647 | } | 665 | } |
648 | 666 | ||
667 | if (!file->already_used) { | ||
668 | file->already_used = 1; | ||
669 | if (!file->use_pkey_index) { | ||
670 | printk(KERN_WARNING "user_mad: process %s did not enable " | ||
671 | "P_Key index support.\n", current->comm); | ||
672 | printk(KERN_WARNING "user_mad: Documentation/infiniband/user_mad.txt " | ||
673 | "has info on the new ABI.\n"); | ||
674 | } | ||
675 | } | ||
676 | |||
649 | file->agent[agent_id] = agent; | 677 | file->agent[agent_id] = agent; |
650 | ret = 0; | 678 | ret = 0; |
651 | 679 | ||
@@ -682,6 +710,20 @@ out: | |||
682 | return ret; | 710 | return ret; |
683 | } | 711 | } |
684 | 712 | ||
713 | static long ib_umad_enable_pkey(struct ib_umad_file *file) | ||
714 | { | ||
715 | int ret = 0; | ||
716 | |||
717 | down_write(&file->port->mutex); | ||
718 | if (file->already_used) | ||
719 | ret = -EINVAL; | ||
720 | else | ||
721 | file->use_pkey_index = 1; | ||
722 | up_write(&file->port->mutex); | ||
723 | |||
724 | return ret; | ||
725 | } | ||
726 | |||
685 | static long ib_umad_ioctl(struct file *filp, unsigned int cmd, | 727 | static long ib_umad_ioctl(struct file *filp, unsigned int cmd, |
686 | unsigned long arg) | 728 | unsigned long arg) |
687 | { | 729 | { |
@@ -690,6 +732,8 @@ static long ib_umad_ioctl(struct file *filp, unsigned int cmd, | |||
690 | return ib_umad_reg_agent(filp->private_data, arg); | 732 | return ib_umad_reg_agent(filp->private_data, arg); |
691 | case IB_USER_MAD_UNREGISTER_AGENT: | 733 | case IB_USER_MAD_UNREGISTER_AGENT: |
692 | return ib_umad_unreg_agent(filp->private_data, arg); | 734 | return ib_umad_unreg_agent(filp->private_data, arg); |
735 | case IB_USER_MAD_ENABLE_PKEY: | ||
736 | return ib_umad_enable_pkey(filp->private_data); | ||
693 | default: | 737 | default: |
694 | return -ENOIOCTLCMD; | 738 | return -ENOIOCTLCMD; |
695 | } | 739 | } |