aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/core/sa_query.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/core/sa_query.c')
-rw-r--r--drivers/infiniband/core/sa_query.c224
1 files changed, 182 insertions, 42 deletions
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index 276e1a53010d..795184931c83 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -29,7 +30,7 @@
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE. 31 * SOFTWARE.
31 * 32 *
32 * $Id: sa_query.c 1389 2004-12-27 22:56:47Z roland $ 33 * $Id: sa_query.c 2811 2005-07-06 18:11:43Z halr $
33 */ 34 */
34 35
35#include <linux/module.h> 36#include <linux/module.h>
@@ -50,26 +51,6 @@ MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("InfiniBand subnet administration query support"); 51MODULE_DESCRIPTION("InfiniBand subnet administration query support");
51MODULE_LICENSE("Dual BSD/GPL"); 52MODULE_LICENSE("Dual BSD/GPL");
52 53
53/*
54 * These two structures must be packed because they have 64-bit fields
55 * that are only 32-bit aligned. 64-bit architectures will lay them
56 * out wrong otherwise. (And unfortunately they are sent on the wire
57 * so we can't change the layout)
58 */
59struct ib_sa_hdr {
60 u64 sm_key;
61 u16 attr_offset;
62 u16 reserved;
63 ib_sa_comp_mask comp_mask;
64} __attribute__ ((packed));
65
66struct ib_sa_mad {
67 struct ib_mad_hdr mad_hdr;
68 struct ib_rmpp_hdr rmpp_hdr;
69 struct ib_sa_hdr sa_hdr;
70 u8 data[200];
71} __attribute__ ((packed));
72
73struct ib_sa_sm_ah { 54struct ib_sa_sm_ah {
74 struct ib_ah *ah; 55 struct ib_ah *ah;
75 struct kref ref; 56 struct kref ref;
@@ -77,7 +58,6 @@ struct ib_sa_sm_ah {
77 58
78struct ib_sa_port { 59struct ib_sa_port {
79 struct ib_mad_agent *agent; 60 struct ib_mad_agent *agent;
80 struct ib_mr *mr;
81 struct ib_sa_sm_ah *sm_ah; 61 struct ib_sa_sm_ah *sm_ah;
82 struct work_struct update_task; 62 struct work_struct update_task;
83 spinlock_t ah_lock; 63 spinlock_t ah_lock;
@@ -100,6 +80,12 @@ struct ib_sa_query {
100 int id; 80 int id;
101}; 81};
102 82
83struct ib_sa_service_query {
84 void (*callback)(int, struct ib_sa_service_rec *, void *);
85 void *context;
86 struct ib_sa_query sa_query;
87};
88
103struct ib_sa_path_query { 89struct ib_sa_path_query {
104 void (*callback)(int, struct ib_sa_path_rec *, void *); 90 void (*callback)(int, struct ib_sa_path_rec *, void *);
105 void *context; 91 void *context;
@@ -341,6 +327,54 @@ static const struct ib_field mcmember_rec_table[] = {
341 .size_bits = 23 }, 327 .size_bits = 23 },
342}; 328};
343 329
330#define SERVICE_REC_FIELD(field) \
331 .struct_offset_bytes = offsetof(struct ib_sa_service_rec, field), \
332 .struct_size_bytes = sizeof ((struct ib_sa_service_rec *) 0)->field, \
333 .field_name = "sa_service_rec:" #field
334
335static const struct ib_field service_rec_table[] = {
336 { SERVICE_REC_FIELD(id),
337 .offset_words = 0,
338 .offset_bits = 0,
339 .size_bits = 64 },
340 { SERVICE_REC_FIELD(gid),
341 .offset_words = 2,
342 .offset_bits = 0,
343 .size_bits = 128 },
344 { SERVICE_REC_FIELD(pkey),
345 .offset_words = 6,
346 .offset_bits = 0,
347 .size_bits = 16 },
348 { SERVICE_REC_FIELD(lease),
349 .offset_words = 7,
350 .offset_bits = 0,
351 .size_bits = 32 },
352 { SERVICE_REC_FIELD(key),
353 .offset_words = 8,
354 .offset_bits = 0,
355 .size_bits = 128 },
356 { SERVICE_REC_FIELD(name),
357 .offset_words = 12,
358 .offset_bits = 0,
359 .size_bits = 64*8 },
360 { SERVICE_REC_FIELD(data8),
361 .offset_words = 28,
362 .offset_bits = 0,
363 .size_bits = 16*8 },
364 { SERVICE_REC_FIELD(data16),
365 .offset_words = 32,
366 .offset_bits = 0,
367 .size_bits = 8*16 },
368 { SERVICE_REC_FIELD(data32),
369 .offset_words = 36,
370 .offset_bits = 0,
371 .size_bits = 4*32 },
372 { SERVICE_REC_FIELD(data64),
373 .offset_words = 40,
374 .offset_bits = 0,
375 .size_bits = 2*64 },
376};
377
344static void free_sm_ah(struct kref *kref) 378static void free_sm_ah(struct kref *kref)
345{ 379{
346 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref); 380 struct ib_sa_sm_ah *sm_ah = container_of(kref, struct ib_sa_sm_ah, ref);
@@ -463,7 +497,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms)
463 .mad_hdr = &query->mad->mad_hdr, 497 .mad_hdr = &query->mad->mad_hdr,
464 .remote_qpn = 1, 498 .remote_qpn = 1,
465 .remote_qkey = IB_QP1_QKEY, 499 .remote_qkey = IB_QP1_QKEY,
466 .timeout_ms = timeout_ms 500 .timeout_ms = timeout_ms,
467 } 501 }
468 } 502 }
469 }; 503 };
@@ -492,7 +526,7 @@ retry:
492 sizeof (struct ib_sa_mad), 526 sizeof (struct ib_sa_mad),
493 DMA_TO_DEVICE); 527 DMA_TO_DEVICE);
494 gather_list.length = sizeof (struct ib_sa_mad); 528 gather_list.length = sizeof (struct ib_sa_mad);
495 gather_list.lkey = port->mr->lkey; 529 gather_list.lkey = port->agent->mr->lkey;
496 pci_unmap_addr_set(query, mapping, gather_list.addr); 530 pci_unmap_addr_set(query, mapping, gather_list.addr);
497 531
498 ret = ib_post_send_mad(port->agent, &wr, &bad_wr); 532 ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
@@ -507,7 +541,13 @@ retry:
507 spin_unlock_irqrestore(&idr_lock, flags); 541 spin_unlock_irqrestore(&idr_lock, flags);
508 } 542 }
509 543
510 return ret; 544 /*
545 * It's not safe to dereference query any more, because the
546 * send may already have completed and freed the query in
547 * another context. So use wr.wr_id, which has a copy of the
548 * query's id.
549 */
550 return ret ? ret : wr.wr_id;
511} 551}
512 552
513static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query, 553static void ib_sa_path_rec_callback(struct ib_sa_query *sa_query,
@@ -560,7 +600,7 @@ static void ib_sa_path_rec_release(struct ib_sa_query *sa_query)
560int ib_sa_path_rec_get(struct ib_device *device, u8 port_num, 600int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
561 struct ib_sa_path_rec *rec, 601 struct ib_sa_path_rec *rec,
562 ib_sa_comp_mask comp_mask, 602 ib_sa_comp_mask comp_mask,
563 int timeout_ms, int gfp_mask, 603 int timeout_ms, unsigned int __nocast gfp_mask,
564 void (*callback)(int status, 604 void (*callback)(int status,
565 struct ib_sa_path_rec *resp, 605 struct ib_sa_path_rec *resp,
566 void *context), 606 void *context),
@@ -598,17 +638,126 @@ int ib_sa_path_rec_get(struct ib_device *device, u8 port_num,
598 rec, query->sa_query.mad->data); 638 rec, query->sa_query.mad->data);
599 639
600 *sa_query = &query->sa_query; 640 *sa_query = &query->sa_query;
641
601 ret = send_mad(&query->sa_query, timeout_ms); 642 ret = send_mad(&query->sa_query, timeout_ms);
602 if (ret) { 643 if (ret < 0) {
603 *sa_query = NULL; 644 *sa_query = NULL;
604 kfree(query->sa_query.mad); 645 kfree(query->sa_query.mad);
605 kfree(query); 646 kfree(query);
606 } 647 }
607 648
608 return ret ? ret : query->sa_query.id; 649 return ret;
609} 650}
610EXPORT_SYMBOL(ib_sa_path_rec_get); 651EXPORT_SYMBOL(ib_sa_path_rec_get);
611 652
653static void ib_sa_service_rec_callback(struct ib_sa_query *sa_query,
654 int status,
655 struct ib_sa_mad *mad)
656{
657 struct ib_sa_service_query *query =
658 container_of(sa_query, struct ib_sa_service_query, sa_query);
659
660 if (mad) {
661 struct ib_sa_service_rec rec;
662
663 ib_unpack(service_rec_table, ARRAY_SIZE(service_rec_table),
664 mad->data, &rec);
665 query->callback(status, &rec, query->context);
666 } else
667 query->callback(status, NULL, query->context);
668}
669
670static void ib_sa_service_rec_release(struct ib_sa_query *sa_query)
671{
672 kfree(sa_query->mad);
673 kfree(container_of(sa_query, struct ib_sa_service_query, sa_query));
674}
675
676/**
677 * ib_sa_service_rec_query - Start Service Record operation
678 * @device:device to send request on
679 * @port_num: port number to send request on
680 * @method:SA method - should be get, set, or delete
681 * @rec:Service Record to send in request
682 * @comp_mask:component mask to send in request
683 * @timeout_ms:time to wait for response
684 * @gfp_mask:GFP mask to use for internal allocations
685 * @callback:function called when request completes, times out or is
686 * canceled
687 * @context:opaque user context passed to callback
688 * @sa_query:request context, used to cancel request
689 *
690 * Send a Service Record set/get/delete to the SA to register,
691 * unregister or query a service record.
692 * The callback function will be called when the request completes (or
693 * fails); status is 0 for a successful response, -EINTR if the query
694 * is canceled, -ETIMEDOUT is the query timed out, or -EIO if an error
695 * occurred sending the query. The resp parameter of the callback is
696 * only valid if status is 0.
697 *
698 * If the return value of ib_sa_service_rec_query() is negative, it is an
699 * error code. Otherwise it is a request ID that can be used to cancel
700 * the query.
701 */
702int ib_sa_service_rec_query(struct ib_device *device, u8 port_num, u8 method,
703 struct ib_sa_service_rec *rec,
704 ib_sa_comp_mask comp_mask,
705 int timeout_ms, unsigned int __nocast gfp_mask,
706 void (*callback)(int status,
707 struct ib_sa_service_rec *resp,
708 void *context),
709 void *context,
710 struct ib_sa_query **sa_query)
711{
712 struct ib_sa_service_query *query;
713 struct ib_sa_device *sa_dev = ib_get_client_data(device, &sa_client);
714 struct ib_sa_port *port = &sa_dev->port[port_num - sa_dev->start_port];
715 struct ib_mad_agent *agent = port->agent;
716 int ret;
717
718 if (method != IB_MGMT_METHOD_GET &&
719 method != IB_MGMT_METHOD_SET &&
720 method != IB_SA_METHOD_DELETE)
721 return -EINVAL;
722
723 query = kmalloc(sizeof *query, gfp_mask);
724 if (!query)
725 return -ENOMEM;
726 query->sa_query.mad = kmalloc(sizeof *query->sa_query.mad, gfp_mask);
727 if (!query->sa_query.mad) {
728 kfree(query);
729 return -ENOMEM;
730 }
731
732 query->callback = callback;
733 query->context = context;
734
735 init_mad(query->sa_query.mad, agent);
736
737 query->sa_query.callback = callback ? ib_sa_service_rec_callback : NULL;
738 query->sa_query.release = ib_sa_service_rec_release;
739 query->sa_query.port = port;
740 query->sa_query.mad->mad_hdr.method = method;
741 query->sa_query.mad->mad_hdr.attr_id =
742 cpu_to_be16(IB_SA_ATTR_SERVICE_REC);
743 query->sa_query.mad->sa_hdr.comp_mask = comp_mask;
744
745 ib_pack(service_rec_table, ARRAY_SIZE(service_rec_table),
746 rec, query->sa_query.mad->data);
747
748 *sa_query = &query->sa_query;
749
750 ret = send_mad(&query->sa_query, timeout_ms);
751 if (ret < 0) {
752 *sa_query = NULL;
753 kfree(query->sa_query.mad);
754 kfree(query);
755 }
756
757 return ret;
758}
759EXPORT_SYMBOL(ib_sa_service_rec_query);
760
612static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query, 761static void ib_sa_mcmember_rec_callback(struct ib_sa_query *sa_query,
613 int status, 762 int status,
614 struct ib_sa_mad *mad) 763 struct ib_sa_mad *mad)
@@ -636,7 +785,7 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
636 u8 method, 785 u8 method,
637 struct ib_sa_mcmember_rec *rec, 786 struct ib_sa_mcmember_rec *rec,
638 ib_sa_comp_mask comp_mask, 787 ib_sa_comp_mask comp_mask,
639 int timeout_ms, int gfp_mask, 788 int timeout_ms, unsigned int __nocast gfp_mask,
640 void (*callback)(int status, 789 void (*callback)(int status,
641 struct ib_sa_mcmember_rec *resp, 790 struct ib_sa_mcmember_rec *resp,
642 void *context), 791 void *context),
@@ -674,14 +823,15 @@ int ib_sa_mcmember_rec_query(struct ib_device *device, u8 port_num,
674 rec, query->sa_query.mad->data); 823 rec, query->sa_query.mad->data);
675 824
676 *sa_query = &query->sa_query; 825 *sa_query = &query->sa_query;
826
677 ret = send_mad(&query->sa_query, timeout_ms); 827 ret = send_mad(&query->sa_query, timeout_ms);
678 if (ret) { 828 if (ret < 0) {
679 *sa_query = NULL; 829 *sa_query = NULL;
680 kfree(query->sa_query.mad); 830 kfree(query->sa_query.mad);
681 kfree(query); 831 kfree(query);
682 } 832 }
683 833
684 return ret ? ret : query->sa_query.id; 834 return ret;
685} 835}
686EXPORT_SYMBOL(ib_sa_mcmember_rec_query); 836EXPORT_SYMBOL(ib_sa_mcmember_rec_query);
687 837
@@ -772,7 +922,6 @@ static void ib_sa_add_one(struct ib_device *device)
772 sa_dev->end_port = e; 922 sa_dev->end_port = e;
773 923
774 for (i = 0; i <= e - s; ++i) { 924 for (i = 0; i <= e - s; ++i) {
775 sa_dev->port[i].mr = NULL;
776 sa_dev->port[i].sm_ah = NULL; 925 sa_dev->port[i].sm_ah = NULL;
777 sa_dev->port[i].port_num = i + s; 926 sa_dev->port[i].port_num = i + s;
778 spin_lock_init(&sa_dev->port[i].ah_lock); 927 spin_lock_init(&sa_dev->port[i].ah_lock);
@@ -784,13 +933,6 @@ static void ib_sa_add_one(struct ib_device *device)
784 if (IS_ERR(sa_dev->port[i].agent)) 933 if (IS_ERR(sa_dev->port[i].agent))
785 goto err; 934 goto err;
786 935
787 sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
788 IB_ACCESS_LOCAL_WRITE);
789 if (IS_ERR(sa_dev->port[i].mr)) {
790 ib_unregister_mad_agent(sa_dev->port[i].agent);
791 goto err;
792 }
793
794 INIT_WORK(&sa_dev->port[i].update_task, 936 INIT_WORK(&sa_dev->port[i].update_task,
795 update_sm_ah, &sa_dev->port[i]); 937 update_sm_ah, &sa_dev->port[i]);
796 } 938 }
@@ -814,10 +956,8 @@ static void ib_sa_add_one(struct ib_device *device)
814 return; 956 return;
815 957
816err: 958err:
817 while (--i >= 0) { 959 while (--i >= 0)
818 ib_dereg_mr(sa_dev->port[i].mr);
819 ib_unregister_mad_agent(sa_dev->port[i].agent); 960 ib_unregister_mad_agent(sa_dev->port[i].agent);
820 }
821 961
822 kfree(sa_dev); 962 kfree(sa_dev);
823 963