diff options
author | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-04-29 01:11:23 -0400 |
---|---|---|
committer | Dmitry Torokhov <dtor_core@ameritech.net> | 2006-04-29 01:11:23 -0400 |
commit | 7b7e394185014e0f3bd8989cac937003f20ef9ce (patch) | |
tree | 3beda5f979bba0aa9822534e239cf1b45f3be69c /drivers/infiniband/hw/ipath/ipath_verbs_mcast.c | |
parent | ddc5d3414593e4d7ad7fbd33e7f7517fcc234544 (diff) | |
parent | 693f7d362055261882659475d2ef022e32edbff1 (diff) |
Merge rsync://rsync.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_verbs_mcast.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_verbs_mcast.c | 333 |
1 files changed, 333 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c new file mode 100644 index 000000000000..10b31d2c4f20 --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_verbs_mcast.c | |||
@@ -0,0 +1,333 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <linux/list.h> | ||
34 | #include <linux/rcupdate.h> | ||
35 | |||
36 | #include "ipath_verbs.h" | ||
37 | |||
38 | /* | ||
39 | * Global table of GID to attached QPs. | ||
40 | * The table is global to all ipath devices since a send from one QP/device | ||
41 | * needs to be locally routed to any locally attached QPs on the same | ||
42 | * or different device. | ||
43 | */ | ||
44 | static struct rb_root mcast_tree; | ||
45 | static DEFINE_SPINLOCK(mcast_lock); | ||
46 | |||
47 | /** | ||
48 | * ipath_mcast_qp_alloc - alloc a struct to link a QP to mcast GID struct | ||
49 | * @qp: the QP to link | ||
50 | */ | ||
51 | static struct ipath_mcast_qp *ipath_mcast_qp_alloc(struct ipath_qp *qp) | ||
52 | { | ||
53 | struct ipath_mcast_qp *mqp; | ||
54 | |||
55 | mqp = kmalloc(sizeof *mqp, GFP_KERNEL); | ||
56 | if (!mqp) | ||
57 | goto bail; | ||
58 | |||
59 | mqp->qp = qp; | ||
60 | atomic_inc(&qp->refcount); | ||
61 | |||
62 | bail: | ||
63 | return mqp; | ||
64 | } | ||
65 | |||
66 | static void ipath_mcast_qp_free(struct ipath_mcast_qp *mqp) | ||
67 | { | ||
68 | struct ipath_qp *qp = mqp->qp; | ||
69 | |||
70 | /* Notify ipath_destroy_qp() if it is waiting. */ | ||
71 | if (atomic_dec_and_test(&qp->refcount)) | ||
72 | wake_up(&qp->wait); | ||
73 | |||
74 | kfree(mqp); | ||
75 | } | ||
76 | |||
77 | /** | ||
78 | * ipath_mcast_alloc - allocate the multicast GID structure | ||
79 | * @mgid: the multicast GID | ||
80 | * | ||
81 | * A list of QPs will be attached to this structure. | ||
82 | */ | ||
83 | static struct ipath_mcast *ipath_mcast_alloc(union ib_gid *mgid) | ||
84 | { | ||
85 | struct ipath_mcast *mcast; | ||
86 | |||
87 | mcast = kmalloc(sizeof *mcast, GFP_KERNEL); | ||
88 | if (!mcast) | ||
89 | goto bail; | ||
90 | |||
91 | mcast->mgid = *mgid; | ||
92 | INIT_LIST_HEAD(&mcast->qp_list); | ||
93 | init_waitqueue_head(&mcast->wait); | ||
94 | atomic_set(&mcast->refcount, 0); | ||
95 | |||
96 | bail: | ||
97 | return mcast; | ||
98 | } | ||
99 | |||
100 | static void ipath_mcast_free(struct ipath_mcast *mcast) | ||
101 | { | ||
102 | struct ipath_mcast_qp *p, *tmp; | ||
103 | |||
104 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) | ||
105 | ipath_mcast_qp_free(p); | ||
106 | |||
107 | kfree(mcast); | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * ipath_mcast_find - search the global table for the given multicast GID | ||
112 | * @mgid: the multicast GID to search for | ||
113 | * | ||
114 | * Returns NULL if not found. | ||
115 | * | ||
116 | * The caller is responsible for decrementing the reference count if found. | ||
117 | */ | ||
118 | struct ipath_mcast *ipath_mcast_find(union ib_gid *mgid) | ||
119 | { | ||
120 | struct rb_node *n; | ||
121 | unsigned long flags; | ||
122 | struct ipath_mcast *mcast; | ||
123 | |||
124 | spin_lock_irqsave(&mcast_lock, flags); | ||
125 | n = mcast_tree.rb_node; | ||
126 | while (n) { | ||
127 | int ret; | ||
128 | |||
129 | mcast = rb_entry(n, struct ipath_mcast, rb_node); | ||
130 | |||
131 | ret = memcmp(mgid->raw, mcast->mgid.raw, | ||
132 | sizeof(union ib_gid)); | ||
133 | if (ret < 0) | ||
134 | n = n->rb_left; | ||
135 | else if (ret > 0) | ||
136 | n = n->rb_right; | ||
137 | else { | ||
138 | atomic_inc(&mcast->refcount); | ||
139 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
140 | goto bail; | ||
141 | } | ||
142 | } | ||
143 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
144 | |||
145 | mcast = NULL; | ||
146 | |||
147 | bail: | ||
148 | return mcast; | ||
149 | } | ||
150 | |||
151 | /** | ||
152 | * ipath_mcast_add - insert mcast GID into table and attach QP struct | ||
153 | * @mcast: the mcast GID table | ||
154 | * @mqp: the QP to attach | ||
155 | * | ||
156 | * Return zero if both were added. Return EEXIST if the GID was already in | ||
157 | * the table but the QP was added. Return ESRCH if the QP was already | ||
158 | * attached and neither structure was added. | ||
159 | */ | ||
160 | static int ipath_mcast_add(struct ipath_mcast *mcast, | ||
161 | struct ipath_mcast_qp *mqp) | ||
162 | { | ||
163 | struct rb_node **n = &mcast_tree.rb_node; | ||
164 | struct rb_node *pn = NULL; | ||
165 | unsigned long flags; | ||
166 | int ret; | ||
167 | |||
168 | spin_lock_irqsave(&mcast_lock, flags); | ||
169 | |||
170 | while (*n) { | ||
171 | struct ipath_mcast *tmcast; | ||
172 | struct ipath_mcast_qp *p; | ||
173 | |||
174 | pn = *n; | ||
175 | tmcast = rb_entry(pn, struct ipath_mcast, rb_node); | ||
176 | |||
177 | ret = memcmp(mcast->mgid.raw, tmcast->mgid.raw, | ||
178 | sizeof(union ib_gid)); | ||
179 | if (ret < 0) { | ||
180 | n = &pn->rb_left; | ||
181 | continue; | ||
182 | } | ||
183 | if (ret > 0) { | ||
184 | n = &pn->rb_right; | ||
185 | continue; | ||
186 | } | ||
187 | |||
188 | /* Search the QP list to see if this is already there. */ | ||
189 | list_for_each_entry_rcu(p, &tmcast->qp_list, list) { | ||
190 | if (p->qp == mqp->qp) { | ||
191 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
192 | ret = ESRCH; | ||
193 | goto bail; | ||
194 | } | ||
195 | } | ||
196 | list_add_tail_rcu(&mqp->list, &tmcast->qp_list); | ||
197 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
198 | ret = EEXIST; | ||
199 | goto bail; | ||
200 | } | ||
201 | |||
202 | list_add_tail_rcu(&mqp->list, &mcast->qp_list); | ||
203 | |||
204 | atomic_inc(&mcast->refcount); | ||
205 | rb_link_node(&mcast->rb_node, pn, n); | ||
206 | rb_insert_color(&mcast->rb_node, &mcast_tree); | ||
207 | |||
208 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
209 | |||
210 | ret = 0; | ||
211 | |||
212 | bail: | ||
213 | return ret; | ||
214 | } | ||
215 | |||
216 | int ipath_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
217 | { | ||
218 | struct ipath_qp *qp = to_iqp(ibqp); | ||
219 | struct ipath_mcast *mcast; | ||
220 | struct ipath_mcast_qp *mqp; | ||
221 | int ret; | ||
222 | |||
223 | /* | ||
224 | * Allocate data structures since its better to do this outside of | ||
225 | * spin locks and it will most likely be needed. | ||
226 | */ | ||
227 | mcast = ipath_mcast_alloc(gid); | ||
228 | if (mcast == NULL) { | ||
229 | ret = -ENOMEM; | ||
230 | goto bail; | ||
231 | } | ||
232 | mqp = ipath_mcast_qp_alloc(qp); | ||
233 | if (mqp == NULL) { | ||
234 | ipath_mcast_free(mcast); | ||
235 | ret = -ENOMEM; | ||
236 | goto bail; | ||
237 | } | ||
238 | switch (ipath_mcast_add(mcast, mqp)) { | ||
239 | case ESRCH: | ||
240 | /* Neither was used: can't attach the same QP twice. */ | ||
241 | ipath_mcast_qp_free(mqp); | ||
242 | ipath_mcast_free(mcast); | ||
243 | ret = -EINVAL; | ||
244 | goto bail; | ||
245 | case EEXIST: /* The mcast wasn't used */ | ||
246 | ipath_mcast_free(mcast); | ||
247 | break; | ||
248 | default: | ||
249 | break; | ||
250 | } | ||
251 | |||
252 | ret = 0; | ||
253 | |||
254 | bail: | ||
255 | return ret; | ||
256 | } | ||
257 | |||
258 | int ipath_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) | ||
259 | { | ||
260 | struct ipath_qp *qp = to_iqp(ibqp); | ||
261 | struct ipath_mcast *mcast = NULL; | ||
262 | struct ipath_mcast_qp *p, *tmp; | ||
263 | struct rb_node *n; | ||
264 | unsigned long flags; | ||
265 | int last = 0; | ||
266 | int ret; | ||
267 | |||
268 | spin_lock_irqsave(&mcast_lock, flags); | ||
269 | |||
270 | /* Find the GID in the mcast table. */ | ||
271 | n = mcast_tree.rb_node; | ||
272 | while (1) { | ||
273 | if (n == NULL) { | ||
274 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
275 | ret = 0; | ||
276 | goto bail; | ||
277 | } | ||
278 | |||
279 | mcast = rb_entry(n, struct ipath_mcast, rb_node); | ||
280 | ret = memcmp(gid->raw, mcast->mgid.raw, | ||
281 | sizeof(union ib_gid)); | ||
282 | if (ret < 0) | ||
283 | n = n->rb_left; | ||
284 | else if (ret > 0) | ||
285 | n = n->rb_right; | ||
286 | else | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | /* Search the QP list. */ | ||
291 | list_for_each_entry_safe(p, tmp, &mcast->qp_list, list) { | ||
292 | if (p->qp != qp) | ||
293 | continue; | ||
294 | /* | ||
295 | * We found it, so remove it, but don't poison the forward | ||
296 | * link until we are sure there are no list walkers. | ||
297 | */ | ||
298 | list_del_rcu(&p->list); | ||
299 | |||
300 | /* If this was the last attached QP, remove the GID too. */ | ||
301 | if (list_empty(&mcast->qp_list)) { | ||
302 | rb_erase(&mcast->rb_node, &mcast_tree); | ||
303 | last = 1; | ||
304 | } | ||
305 | break; | ||
306 | } | ||
307 | |||
308 | spin_unlock_irqrestore(&mcast_lock, flags); | ||
309 | |||
310 | if (p) { | ||
311 | /* | ||
312 | * Wait for any list walkers to finish before freeing the | ||
313 | * list element. | ||
314 | */ | ||
315 | wait_event(mcast->wait, atomic_read(&mcast->refcount) <= 1); | ||
316 | ipath_mcast_qp_free(p); | ||
317 | } | ||
318 | if (last) { | ||
319 | atomic_dec(&mcast->refcount); | ||
320 | wait_event(mcast->wait, !atomic_read(&mcast->refcount)); | ||
321 | ipath_mcast_free(mcast); | ||
322 | } | ||
323 | |||
324 | ret = 0; | ||
325 | |||
326 | bail: | ||
327 | return ret; | ||
328 | } | ||
329 | |||
330 | int ipath_mcast_tree_empty(void) | ||
331 | { | ||
332 | return mcast_tree.rb_node == NULL; | ||
333 | } | ||