aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBryan O'Sullivan <bos@pathscale.com>2006-07-01 07:35:58 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-01 12:55:59 -0400
commitfe62546a6afa141c4ab9aef65f5978a1b36cb523 (patch)
tree44b317774fc0db45e90c394a1575bb19d08c00bb
parente8a88f09f21c55a7e7f570290ecde570e2c37771 (diff)
[PATCH] IB/ipath: enforce device resource limits
These limits are somewhat artificial in that we don't actually have any device limits. However, the verbs layer expects that such limits exist and are enforced, so we make up arbitrary (but sensible) limits. Signed-off-by: Robert Walsh <robert.walsh@qlogic.com> Signed-off-by: Bryan O'Sullivan <bryan.osullivan@qlogic.com> Cc: "Michael S. Tsirkin" <mst@mellanox.co.il> Cc: Roland Dreier <rolandd@cisco.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--drivers/infiniband/hw/ipath/ipath_cq.c15
-rw-r--r--drivers/infiniband/hw/ipath/ipath_qp.c6
-rw-r--r--drivers/infiniband/hw/ipath/ipath_srq.c47
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.c109
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs.h24
-rw-r--r--drivers/infiniband/hw/ipath/ipath_verbs_mcast.c36
6 files changed, 204 insertions, 33 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_cq.c b/drivers/infiniband/hw/ipath/ipath_cq.c
index 392eeb394637..3efee341c9bc 100644
--- a/drivers/infiniband/hw/ipath/ipath_cq.c
+++ b/drivers/infiniband/hw/ipath/ipath_cq.c
@@ -158,10 +158,21 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
158 struct ib_ucontext *context, 158 struct ib_ucontext *context,
159 struct ib_udata *udata) 159 struct ib_udata *udata)
160{ 160{
161 struct ipath_ibdev *dev = to_idev(ibdev);
161 struct ipath_cq *cq; 162 struct ipath_cq *cq;
162 struct ib_wc *wc; 163 struct ib_wc *wc;
163 struct ib_cq *ret; 164 struct ib_cq *ret;
164 165
166 if (entries > ib_ipath_max_cqes) {
167 ret = ERR_PTR(-EINVAL);
168 goto bail;
169 }
170
171 if (dev->n_cqs_allocated == ib_ipath_max_cqs) {
172 ret = ERR_PTR(-ENOMEM);
173 goto bail;
174 }
175
165 /* 176 /*
166 * Need to use vmalloc() if we want to support large #s of 177 * Need to use vmalloc() if we want to support large #s of
167 * entries. 178 * entries.
@@ -197,6 +208,8 @@ struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
197 208
198 ret = &cq->ibcq; 209 ret = &cq->ibcq;
199 210
211 dev->n_cqs_allocated++;
212
200bail: 213bail:
201 return ret; 214 return ret;
202} 215}
@@ -211,9 +224,11 @@ bail:
211 */ 224 */
212int ipath_destroy_cq(struct ib_cq *ibcq) 225int ipath_destroy_cq(struct ib_cq *ibcq)
213{ 226{
227 struct ipath_ibdev *dev = to_idev(ibcq->device);
214 struct ipath_cq *cq = to_icq(ibcq); 228 struct ipath_cq *cq = to_icq(ibcq);
215 229
216 tasklet_kill(&cq->comptask); 230 tasklet_kill(&cq->comptask);
231 dev->n_cqs_allocated--;
217 vfree(cq->queue); 232 vfree(cq->queue);
218 kfree(cq); 233 kfree(cq);
219 234
diff --git a/drivers/infiniband/hw/ipath/ipath_qp.c b/drivers/infiniband/hw/ipath/ipath_qp.c
index fd4d55bdee78..83b9a6a5e2c6 100644
--- a/drivers/infiniband/hw/ipath/ipath_qp.c
+++ b/drivers/infiniband/hw/ipath/ipath_qp.c
@@ -661,8 +661,10 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
661 size_t sz; 661 size_t sz;
662 struct ib_qp *ret; 662 struct ib_qp *ret;
663 663
664 if (init_attr->cap.max_send_sge > 255 || 664 if (init_attr->cap.max_send_sge > ib_ipath_max_sges ||
665 init_attr->cap.max_recv_sge > 255) { 665 init_attr->cap.max_recv_sge > ib_ipath_max_sges ||
666 init_attr->cap.max_send_wr > ib_ipath_max_qp_wrs ||
667 init_attr->cap.max_recv_wr > ib_ipath_max_qp_wrs) {
666 ret = ERR_PTR(-ENOMEM); 668 ret = ERR_PTR(-ENOMEM);
667 goto bail; 669 goto bail;
668 } 670 }
diff --git a/drivers/infiniband/hw/ipath/ipath_srq.c b/drivers/infiniband/hw/ipath/ipath_srq.c
index 84db5765998e..f760434660bd 100644
--- a/drivers/infiniband/hw/ipath/ipath_srq.c
+++ b/drivers/infiniband/hw/ipath/ipath_srq.c
@@ -126,11 +126,23 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
126 struct ib_srq_init_attr *srq_init_attr, 126 struct ib_srq_init_attr *srq_init_attr,
127 struct ib_udata *udata) 127 struct ib_udata *udata)
128{ 128{
129 struct ipath_ibdev *dev = to_idev(ibpd->device);
129 struct ipath_srq *srq; 130 struct ipath_srq *srq;
130 u32 sz; 131 u32 sz;
131 struct ib_srq *ret; 132 struct ib_srq *ret;
132 133
133 if (srq_init_attr->attr.max_sge < 1) { 134 if (dev->n_srqs_allocated == ib_ipath_max_srqs) {
135 ret = ERR_PTR(-ENOMEM);
136 goto bail;
137 }
138
139 if (srq_init_attr->attr.max_wr == 0) {
140 ret = ERR_PTR(-EINVAL);
141 goto bail;
142 }
143
144 if ((srq_init_attr->attr.max_sge > ib_ipath_max_srq_sges) ||
145 (srq_init_attr->attr.max_wr > ib_ipath_max_srq_wrs)) {
134 ret = ERR_PTR(-EINVAL); 146 ret = ERR_PTR(-EINVAL);
135 goto bail; 147 goto bail;
136 } 148 }
@@ -165,6 +177,8 @@ struct ib_srq *ipath_create_srq(struct ib_pd *ibpd,
165 177
166 ret = &srq->ibsrq; 178 ret = &srq->ibsrq;
167 179
180 dev->n_srqs_allocated++;
181
168bail: 182bail:
169 return ret; 183 return ret;
170} 184}
@@ -182,24 +196,26 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
182 unsigned long flags; 196 unsigned long flags;
183 int ret; 197 int ret;
184 198
185 if (attr_mask & IB_SRQ_LIMIT) { 199 if (attr_mask & IB_SRQ_MAX_WR)
186 spin_lock_irqsave(&srq->rq.lock, flags); 200 if ((attr->max_wr > ib_ipath_max_srq_wrs) ||
187 srq->limit = attr->srq_limit; 201 (attr->max_sge > srq->rq.max_sge)) {
188 spin_unlock_irqrestore(&srq->rq.lock, flags); 202 ret = -EINVAL;
189 } 203 goto bail;
190 if (attr_mask & IB_SRQ_MAX_WR) { 204 }
191 u32 size = attr->max_wr + 1;
192 struct ipath_rwqe *wq, *p;
193 u32 n;
194 u32 sz;
195 205
196 if (attr->max_sge < srq->rq.max_sge) { 206 if (attr_mask & IB_SRQ_LIMIT)
207 if (attr->srq_limit >= srq->rq.size) {
197 ret = -EINVAL; 208 ret = -EINVAL;
198 goto bail; 209 goto bail;
199 } 210 }
200 211
212 if (attr_mask & IB_SRQ_MAX_WR) {
213 struct ipath_rwqe *wq, *p;
214 u32 sz, size, n;
215
201 sz = sizeof(struct ipath_rwqe) + 216 sz = sizeof(struct ipath_rwqe) +
202 attr->max_sge * sizeof(struct ipath_sge); 217 attr->max_sge * sizeof(struct ipath_sge);
218 size = attr->max_wr + 1;
203 wq = vmalloc(size * sz); 219 wq = vmalloc(size * sz);
204 if (!wq) { 220 if (!wq) {
205 ret = -ENOMEM; 221 ret = -ENOMEM;
@@ -243,6 +259,11 @@ int ipath_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
243 spin_unlock_irqrestore(&srq->rq.lock, flags); 259 spin_unlock_irqrestore(&srq->rq.lock, flags);
244 } 260 }
245 261
262 if (attr_mask & IB_SRQ_LIMIT) {
263 spin_lock_irqsave(&srq->rq.lock, flags);
264 srq->limit = attr->srq_limit;
265 spin_unlock_irqrestore(&srq->rq.lock, flags);
266 }
246 ret = 0; 267 ret = 0;
247 268
248bail: 269bail:
@@ -266,7 +287,9 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
266int ipath_destroy_srq(struct ib_srq *ibsrq) 287int ipath_destroy_srq(struct ib_srq *ibsrq)
267{ 288{
268 struct ipath_srq *srq = to_isrq(ibsrq); 289 struct ipath_srq *srq = to_isrq(ibsrq);
290 struct ipath_ibdev *dev = to_idev(ibsrq->device);
269 291
292 dev->n_srqs_allocated--;
270 vfree(srq->rq.wq); 293 vfree(srq->rq.wq);
271 kfree(srq); 294 kfree(srq);
272 295
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c
index e04c7619f9fe..70547d1f5908 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.c
@@ -56,6 +56,59 @@ unsigned int ib_ipath_debug; /* debug mask */
56module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO); 56module_param_named(debug, ib_ipath_debug, uint, S_IWUSR | S_IRUGO);
57MODULE_PARM_DESC(debug, "Verbs debug mask"); 57MODULE_PARM_DESC(debug, "Verbs debug mask");
58 58
59static unsigned int ib_ipath_max_pds = 0xFFFF;
60module_param_named(max_pds, ib_ipath_max_pds, uint, S_IWUSR | S_IRUGO);
61MODULE_PARM_DESC(max_pds,
62 "Maximum number of protection domains to support");
63
64static unsigned int ib_ipath_max_ahs = 0xFFFF;
65module_param_named(max_ahs, ib_ipath_max_ahs, uint, S_IWUSR | S_IRUGO);
66MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support");
67
68unsigned int ib_ipath_max_cqes = 0x2FFFF;
69module_param_named(max_cqes, ib_ipath_max_cqes, uint, S_IWUSR | S_IRUGO);
70MODULE_PARM_DESC(max_cqes,
71 "Maximum number of completion queue entries to support");
72
73unsigned int ib_ipath_max_cqs = 0x1FFFF;
74module_param_named(max_cqs, ib_ipath_max_cqs, uint, S_IWUSR | S_IRUGO);
75MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support");
76
77unsigned int ib_ipath_max_qp_wrs = 0x3FFF;
78module_param_named(max_qp_wrs, ib_ipath_max_qp_wrs, uint,
79 S_IWUSR | S_IRUGO);
80MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support");
81
82unsigned int ib_ipath_max_sges = 0x60;
83module_param_named(max_sges, ib_ipath_max_sges, uint, S_IWUSR | S_IRUGO);
84MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support");
85
86unsigned int ib_ipath_max_mcast_grps = 16384;
87module_param_named(max_mcast_grps, ib_ipath_max_mcast_grps, uint,
88 S_IWUSR | S_IRUGO);
89MODULE_PARM_DESC(max_mcast_grps,
90 "Maximum number of multicast groups to support");
91
92unsigned int ib_ipath_max_mcast_qp_attached = 16;
93module_param_named(max_mcast_qp_attached, ib_ipath_max_mcast_qp_attached,
94 uint, S_IWUSR | S_IRUGO);
95MODULE_PARM_DESC(max_mcast_qp_attached,
96 "Maximum number of attached QPs to support");
97
98unsigned int ib_ipath_max_srqs = 1024;
99module_param_named(max_srqs, ib_ipath_max_srqs, uint, S_IWUSR | S_IRUGO);
100MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support");
101
102unsigned int ib_ipath_max_srq_sges = 128;
103module_param_named(max_srq_sges, ib_ipath_max_srq_sges,
104 uint, S_IWUSR | S_IRUGO);
105MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support");
106
107unsigned int ib_ipath_max_srq_wrs = 0x1FFFF;
108module_param_named(max_srq_wrs, ib_ipath_max_srq_wrs,
109 uint, S_IWUSR | S_IRUGO);
110MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support");
111
59MODULE_LICENSE("GPL"); 112MODULE_LICENSE("GPL");
60MODULE_AUTHOR("QLogic <support@pathscale.com>"); 113MODULE_AUTHOR("QLogic <support@pathscale.com>");
61MODULE_DESCRIPTION("QLogic InfiniPath driver"); 114MODULE_DESCRIPTION("QLogic InfiniPath driver");
@@ -581,24 +634,25 @@ static int ipath_query_device(struct ib_device *ibdev,
581 props->sys_image_guid = dev->sys_image_guid; 634 props->sys_image_guid = dev->sys_image_guid;
582 635
583 props->max_mr_size = ~0ull; 636 props->max_mr_size = ~0ull;
584 props->max_qp = 0xffff; 637 props->max_qp = dev->qp_table.max;
585 props->max_qp_wr = 0xffff; 638 props->max_qp_wr = ib_ipath_max_qp_wrs;
586 props->max_sge = 255; 639 props->max_sge = ib_ipath_max_sges;
587 props->max_cq = 0xffff; 640 props->max_cq = ib_ipath_max_cqs;
588 props->max_cqe = 0xffff; 641 props->max_ah = ib_ipath_max_ahs;
589 props->max_mr = 0xffff; 642 props->max_cqe = ib_ipath_max_cqes;
590 props->max_pd = 0xffff; 643 props->max_mr = dev->lk_table.max;
644 props->max_pd = ib_ipath_max_pds;
591 props->max_qp_rd_atom = 1; 645 props->max_qp_rd_atom = 1;
592 props->max_qp_init_rd_atom = 1; 646 props->max_qp_init_rd_atom = 1;
593 /* props->max_res_rd_atom */ 647 /* props->max_res_rd_atom */
594 props->max_srq = 0xffff; 648 props->max_srq = ib_ipath_max_srqs;
595 props->max_srq_wr = 0xffff; 649 props->max_srq_wr = ib_ipath_max_srq_wrs;
596 props->max_srq_sge = 255; 650 props->max_srq_sge = ib_ipath_max_srq_sges;
597 /* props->local_ca_ack_delay */ 651 /* props->local_ca_ack_delay */
598 props->atomic_cap = IB_ATOMIC_HCA; 652 props->atomic_cap = IB_ATOMIC_HCA;
599 props->max_pkeys = ipath_layer_get_npkeys(dev->dd); 653 props->max_pkeys = ipath_layer_get_npkeys(dev->dd);
600 props->max_mcast_grp = 0xffff; 654 props->max_mcast_grp = ib_ipath_max_mcast_grps;
601 props->max_mcast_qp_attach = 0xffff; 655 props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
602 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * 656 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
603 props->max_mcast_grp; 657 props->max_mcast_grp;
604 658
@@ -741,15 +795,30 @@ static struct ib_pd *ipath_alloc_pd(struct ib_device *ibdev,
741 struct ib_ucontext *context, 795 struct ib_ucontext *context,
742 struct ib_udata *udata) 796 struct ib_udata *udata)
743{ 797{
798 struct ipath_ibdev *dev = to_idev(ibdev);
744 struct ipath_pd *pd; 799 struct ipath_pd *pd;
745 struct ib_pd *ret; 800 struct ib_pd *ret;
746 801
802 /*
803 * This is actually totally arbitrary. Some correctness tests
804 * assume there's a maximum number of PDs that can be allocated.
805 * We don't actually have this limit, but we fail the test if
806 * we allow allocations of more than we report for this value.
807 */
808
809 if (dev->n_pds_allocated == ib_ipath_max_pds) {
810 ret = ERR_PTR(-ENOMEM);
811 goto bail;
812 }
813
747 pd = kmalloc(sizeof *pd, GFP_KERNEL); 814 pd = kmalloc(sizeof *pd, GFP_KERNEL);
748 if (!pd) { 815 if (!pd) {
749 ret = ERR_PTR(-ENOMEM); 816 ret = ERR_PTR(-ENOMEM);
750 goto bail; 817 goto bail;
751 } 818 }
752 819
820 dev->n_pds_allocated++;
821
753 /* ib_alloc_pd() will initialize pd->ibpd. */ 822 /* ib_alloc_pd() will initialize pd->ibpd. */
754 pd->user = udata != NULL; 823 pd->user = udata != NULL;
755 824
@@ -762,6 +831,9 @@ bail:
762static int ipath_dealloc_pd(struct ib_pd *ibpd) 831static int ipath_dealloc_pd(struct ib_pd *ibpd)
763{ 832{
764 struct ipath_pd *pd = to_ipd(ibpd); 833 struct ipath_pd *pd = to_ipd(ibpd);
834 struct ipath_ibdev *dev = to_idev(ibpd->device);
835
836 dev->n_pds_allocated--;
765 837
766 kfree(pd); 838 kfree(pd);
767 839
@@ -780,6 +852,12 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
780{ 852{
781 struct ipath_ah *ah; 853 struct ipath_ah *ah;
782 struct ib_ah *ret; 854 struct ib_ah *ret;
855 struct ipath_ibdev *dev = to_idev(pd->device);
856
857 if (dev->n_ahs_allocated == ib_ipath_max_ahs) {
858 ret = ERR_PTR(-ENOMEM);
859 goto bail;
860 }
783 861
784 /* A multicast address requires a GRH (see ch. 8.4.1). */ 862 /* A multicast address requires a GRH (see ch. 8.4.1). */
785 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE && 863 if (ah_attr->dlid >= IPS_MULTICAST_LID_BASE &&
@@ -794,7 +872,7 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
794 goto bail; 872 goto bail;
795 } 873 }
796 874
797 if (ah_attr->port_num != 1 || 875 if (ah_attr->port_num < 1 ||
798 ah_attr->port_num > pd->device->phys_port_cnt) { 876 ah_attr->port_num > pd->device->phys_port_cnt) {
799 ret = ERR_PTR(-EINVAL); 877 ret = ERR_PTR(-EINVAL);
800 goto bail; 878 goto bail;
@@ -806,6 +884,8 @@ static struct ib_ah *ipath_create_ah(struct ib_pd *pd,
806 goto bail; 884 goto bail;
807 } 885 }
808 886
887 dev->n_ahs_allocated++;
888
809 /* ib_create_ah() will initialize ah->ibah. */ 889 /* ib_create_ah() will initialize ah->ibah. */
810 ah->attr = *ah_attr; 890 ah->attr = *ah_attr;
811 891
@@ -823,8 +903,11 @@ bail:
823 */ 903 */
824static int ipath_destroy_ah(struct ib_ah *ibah) 904static int ipath_destroy_ah(struct ib_ah *ibah)
825{ 905{
906 struct ipath_ibdev *dev = to_idev(ibah->device);
826 struct ipath_ah *ah = to_iah(ibah); 907 struct ipath_ah *ah = to_iah(ibah);
827 908
909 dev->n_ahs_allocated--;
910
828 kfree(ah); 911 kfree(ah);
829 912
830 return 0; 913 return 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.h b/drivers/infiniband/hw/ipath/ipath_verbs.h
index b461316ca223..c57058ff5763 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs.h
+++ b/drivers/infiniband/hw/ipath/ipath_verbs.h
@@ -149,6 +149,7 @@ struct ipath_mcast {
149 struct list_head qp_list; 149 struct list_head qp_list;
150 wait_queue_head_t wait; 150 wait_queue_head_t wait;
151 atomic_t refcount; 151 atomic_t refcount;
152 int n_attached;
152}; 153};
153 154
154/* Memory region */ 155/* Memory region */
@@ -432,6 +433,11 @@ struct ipath_ibdev {
432 __be64 sys_image_guid; /* in network order */ 433 __be64 sys_image_guid; /* in network order */
433 __be64 gid_prefix; /* in network order */ 434 __be64 gid_prefix; /* in network order */
434 __be64 mkey; 435 __be64 mkey;
436 u32 n_pds_allocated; /* number of PDs allocated for device */
437 u32 n_ahs_allocated; /* number of AHs allocated for device */
438 u32 n_cqs_allocated; /* number of CQs allocated for device */
439 u32 n_srqs_allocated; /* number of SRQs allocated for device */
440 u32 n_mcast_grps_allocated; /* number of mcast groups allocated */
435 u64 ipath_sword; /* total dwords sent (sample result) */ 441 u64 ipath_sword; /* total dwords sent (sample result) */
436 u64 ipath_rword; /* total dwords received (sample result) */ 442 u64 ipath_rword; /* total dwords received (sample result) */
437 u64 ipath_spkts; /* total packets sent (sample result) */ 443 u64 ipath_spkts; /* total packets sent (sample result) */
@@ -697,6 +703,24 @@ extern const int ib_ipath_state_ops[];
697 703
698extern unsigned int ib_ipath_lkey_table_size; 704extern unsigned int ib_ipath_lkey_table_size;
699 705
706extern unsigned int ib_ipath_max_cqes;
707
708extern unsigned int ib_ipath_max_cqs;
709
710extern unsigned int ib_ipath_max_qp_wrs;
711
712extern unsigned int ib_ipath_max_sges;
713
714extern unsigned int ib_ipath_max_mcast_grps;
715
716extern unsigned int ib_ipath_max_mcast_qp_attached;
717
718extern unsigned int ib_ipath_max_srqs;
719
720extern unsigned int ib_ipath_max_srq_sges;
721
722extern unsigned int ib_ipath_max_srq_wrs;
723
700extern const u32 ib_ipath_rnr_table[]; 724extern const u32 ib_ipath_rnr_table[];
701 725
702#endif /* IPATH_VERBS_H */ 726#endif /* IPATH_VERBS_H */
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
index 996bbb05c396..ee0e1d96d723 100644
--- a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
+++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c
@@ -93,6 +93,7 @@ static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid)
93 INIT_LIST_HEAD(&mcast->qp_list); 93 INIT_LIST_HEAD(&mcast->qp_list);
94 init_waitqueue_head(&mcast->wait); 94 init_waitqueue_head(&mcast->wait);
95 atomic_set(&mcast->refcount, 0); 95 atomic_set(&mcast->refcount, 0);
96 mcast->n_attached = 0;
96 97
97bail: 98bail:
98 return mcast; 99 return mcast;
@@ -158,7 +159,8 @@ bail:
158 * the table but the QP was added. Return ESRCH if the QP was already 159 * the table but the QP was added. Return ESRCH if the QP was already
159 * attached and neither structure was added. 160 * attached and neither structure was added.
160 */ 161 */
161static int ipath_mcast_add(struct ipath_mcast *mcast, 162static int ipath_mcast_add(struct ipath_ibdev *dev,
163 struct ipath_mcast *mcast,
162 struct ipath_mcast_qp *mqp) 164 struct ipath_mcast_qp *mqp)
163{ 165{
164 struct rb_node **n = &mcast_tree.rb_node; 166 struct rb_node **n = &mcast_tree.rb_node;
@@ -189,34 +191,47 @@ static int ipath_mcast_add(struct ipath_mcast *mcast,
189 /* Search the QP list to see if this is already there. */ 191 /* Search the QP list to see if this is already there. */
190 list_for_each_entry_rcu(p, &tmcast->qp_list, list) { 192 list_for_each_entry_rcu(p, &tmcast->qp_list, list) {
191 if (p->qp == mqp->qp) { 193 if (p->qp == mqp->qp) {
192 spin_unlock_irqrestore(&mcast_lock, flags);
193 ret = ESRCH; 194 ret = ESRCH;
194 goto bail; 195 goto bail;
195 } 196 }
196 } 197 }
198 if (tmcast->n_attached == ib_ipath_max_mcast_qp_attached) {
199 ret = ENOMEM;
200 goto bail;
201 }
202
203 tmcast->n_attached++;
204
197 list_add_tail_rcu(&mqp->list, &tmcast->qp_list); 205 list_add_tail_rcu(&mqp->list, &tmcast->qp_list);
198 spin_unlock_irqrestore(&mcast_lock, flags);
199 ret = EEXIST; 206 ret = EEXIST;
200 goto bail; 207 goto bail;
201 } 208 }
202 209
210 if (dev->n_mcast_grps_allocated == ib_ipath_max_mcast_grps) {
211 ret = ENOMEM;
212 goto bail;
213 }
214
215 dev->n_mcast_grps_allocated++;
216
203 list_add_tail_rcu(&mqp->list, &mcast->qp_list); 217 list_add_tail_rcu(&mqp->list, &mcast->qp_list);
204 218
205 atomic_inc(&mcast->refcount); 219 atomic_inc(&mcast->refcount);
206 rb_link_node(&mcast->rb_node, pn, n); 220 rb_link_node(&mcast->rb_node, pn, n);
207 rb_insert_color(&mcast->rb_node, &mcast_tree); 221 rb_insert_color(&mcast->rb_node, &mcast_tree);
208 222
209 spin_unlock_irqrestore(&mcast_lock, flags);
210
211 ret = 0; 223 ret = 0;
212 224
213bail: 225bail:
226 spin_unlock_irqrestore(&mcast_lock, flags);
227
214 return ret; 228 return ret;
215} 229}
216 230
217int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 231int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
218{ 232{
219 struct ipath_qp *qp = to_iqp(ibqp); 233 struct ipath_qp *qp = to_iqp(ibqp);
234 struct ipath_ibdev *dev = to_idev(ibqp->device);
220 struct ipath_mcast *mcast; 235 struct ipath_mcast *mcast;
221 struct ipath_mcast_qp *mqp; 236 struct ipath_mcast_qp *mqp;
222 int ret; 237 int ret;
@@ -236,7 +251,7 @@ int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
236 ret = -ENOMEM; 251 ret = -ENOMEM;
237 goto bail; 252 goto bail;
238 } 253 }
239 switch (ipath_mcast_add(mcast, mqp)) { 254 switch (ipath_mcast_add(dev, mcast, mqp)) {
240 case ESRCH: 255 case ESRCH:
241 /* Neither was used: can't attach the same QP twice. */ 256 /* Neither was used: can't attach the same QP twice. */
242 ipath_mcast_qp_free(mqp); 257 ipath_mcast_qp_free(mqp);
@@ -246,6 +261,12 @@ int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
246 case EEXIST: /* The mcast wasn't used */ 261 case EEXIST: /* The mcast wasn't used */
247 ipath_mcast_free(mcast); 262 ipath_mcast_free(mcast);
248 break; 263 break;
264 case ENOMEM:
265 /* Exceeded the maximum number of mcast groups. */
266 ipath_mcast_qp_free(mqp);
267 ipath_mcast_free(mcast);
268 ret = -ENOMEM;
269 goto bail;
249 default: 270 default:
250 break; 271 break;
251 } 272 }
@@ -259,6 +280,7 @@ bail:
259int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 280int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
260{ 281{
261 struct ipath_qp *qp = to_iqp(ibqp); 282 struct ipath_qp *qp = to_iqp(ibqp);
283 struct ipath_ibdev *dev = to_idev(ibqp->device);
262 struct ipath_mcast *mcast = NULL; 284 struct ipath_mcast *mcast = NULL;
263 struct ipath_mcast_qp *p, *tmp; 285 struct ipath_mcast_qp *p, *tmp;
264 struct rb_node *n; 286 struct rb_node *n;
@@ -297,6 +319,7 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
297 * link until we are sure there are no list walkers. 319 * link until we are sure there are no list walkers.
298 */ 320 */
299 list_del_rcu(&p->list); 321 list_del_rcu(&p->list);
322 mcast->n_attached--;
300 323
301 /* If this was the last attached QP, remove the GID too. */ 324 /* If this was the last attached QP, remove the GID too. */
302 if (list_empty(&mcast->qp_list)) { 325 if (list_empty(&mcast->qp_list)) {
@@ -320,6 +343,7 @@ int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
320 atomic_dec(&mcast->refcount); 343 atomic_dec(&mcast->refcount);
321 wait_event(mcast->wait, !atomic_read(&mcast->refcount)); 344 wait_event(mcast->wait, !atomic_read(&mcast->refcount));
322 ipath_mcast_free(mcast); 345 ipath_mcast_free(mcast);
346 dev->n_mcast_grps_allocated--;
323 } 347 }
324 348
325 ret = 0; 349 ret = 0;