aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/ulp/ipoib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 18:20:36 -0400
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/infiniband/ulp/ipoib
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'drivers/infiniband/ulp/ipoib')
-rw-r--r--drivers/infiniband/ulp/ipoib/Kconfig33
-rw-r--r--drivers/infiniband/ulp/ipoib/Makefile11
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h353
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c287
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c668
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c1103
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c991
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c260
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c177
9 files changed, 3883 insertions, 0 deletions
diff --git a/drivers/infiniband/ulp/ipoib/Kconfig b/drivers/infiniband/ulp/ipoib/Kconfig
new file mode 100644
index 000000000000..8d2e04cac68e
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Kconfig
@@ -0,0 +1,33 @@
1config INFINIBAND_IPOIB
2 tristate "IP-over-InfiniBand"
3 depends on INFINIBAND && NETDEVICES && INET
4 ---help---
5 Support for the IP-over-InfiniBand protocol (IPoIB). This
6 transports IP packets over InfiniBand so you can use your IB
7 device as a fancy NIC.
8
9 The IPoIB protocol is defined by the IETF ipoib working
10 group: <http://www.ietf.org/html.charters/ipoib-charter.html>.
11
12config INFINIBAND_IPOIB_DEBUG
13 bool "IP-over-InfiniBand debugging"
14 depends on INFINIBAND_IPOIB
15 ---help---
16 This option causes debugging code to be compiled into the
17 IPoIB driver. The output can be turned on via the
18 debug_level and mcast_debug_level module parameters (which
19 can also be set after the driver is loaded through sysfs).
20
21 This option also creates an "ipoib_debugfs," which can be
22 mounted to expose debugging information about IB multicast
23 groups used by the IPoIB driver.
24
25config INFINIBAND_IPOIB_DEBUG_DATA
26 bool "IP-over-InfiniBand data path debugging"
27 depends on INFINIBAND_IPOIB_DEBUG
28 ---help---
29 This option compiles debugging code into the the data path
30 of the IPoIB driver. The output can be turned on via the
31 data_debug_level module parameter; however, even with output
32 turned off, this debugging code will have some performance
33 impact.
diff --git a/drivers/infiniband/ulp/ipoib/Makefile b/drivers/infiniband/ulp/ipoib/Makefile
new file mode 100644
index 000000000000..394bc08abc6f
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/Makefile
@@ -0,0 +1,11 @@
1EXTRA_CFLAGS += -Idrivers/infiniband/include
2
3obj-$(CONFIG_INFINIBAND_IPOIB) += ib_ipoib.o
4
5ib_ipoib-y := ipoib_main.o \
6 ipoib_ib.o \
7 ipoib_multicast.o \
8 ipoib_verbs.o \
9 ipoib_vlan.o
10ib_ipoib-$(CONFIG_INFINIBAND_IPOIB_DEBUG) += ipoib_fs.o
11
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
new file mode 100644
index 000000000000..04c98f54e9c4
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -0,0 +1,353 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib.h 1358 2004-12-17 22:00:11Z roland $
33 */
34
35#ifndef _IPOIB_H
36#define _IPOIB_H
37
38#include <linux/list.h>
39#include <linux/skbuff.h>
40#include <linux/netdevice.h>
41#include <linux/workqueue.h>
42#include <linux/pci.h>
43#include <linux/config.h>
44#include <linux/kref.h>
45#include <linux/if_infiniband.h>
46
47#include <net/neighbour.h>
48
49#include <asm/atomic.h>
50#include <asm/semaphore.h>
51
52#include <ib_verbs.h>
53#include <ib_pack.h>
54#include <ib_sa.h>
55
56/* constants */
57
58enum {
59 IPOIB_PACKET_SIZE = 2048,
60 IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
61
62 IPOIB_ENCAP_LEN = 4,
63
64 IPOIB_RX_RING_SIZE = 128,
65 IPOIB_TX_RING_SIZE = 64,
66
67 IPOIB_NUM_WC = 4,
68
69 IPOIB_MAX_PATH_REC_QUEUE = 3,
70 IPOIB_MAX_MCAST_QUEUE = 3,
71
72 IPOIB_FLAG_OPER_UP = 0,
73 IPOIB_FLAG_ADMIN_UP = 1,
74 IPOIB_PKEY_ASSIGNED = 2,
75 IPOIB_PKEY_STOP = 3,
76 IPOIB_FLAG_SUBINTERFACE = 4,
77 IPOIB_MCAST_RUN = 5,
78 IPOIB_STOP_REAPER = 6,
79
80 IPOIB_MAX_BACKOFF_SECONDS = 16,
81
82 IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
83 IPOIB_MCAST_FLAG_SENDONLY = 1,
84 IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
85 IPOIB_MCAST_FLAG_ATTACHED = 3,
86};
87
88/* structs */
89
90struct ipoib_header {
91 u16 proto;
92 u16 reserved;
93};
94
95struct ipoib_pseudoheader {
96 u8 hwaddr[INFINIBAND_ALEN];
97};
98
99struct ipoib_mcast;
100
101struct ipoib_buf {
102 struct sk_buff *skb;
103 DECLARE_PCI_UNMAP_ADDR(mapping)
104};
105
106/*
107 * Device private locking: tx_lock protects members used in TX fast
108 * path (and we use LLTX so upper layers don't do extra locking).
109 * lock protects everything else. lock nests inside of tx_lock (ie
110 * tx_lock must be acquired first if needed).
111 */
112struct ipoib_dev_priv {
113 spinlock_t lock;
114
115 struct net_device *dev;
116
117 unsigned long flags;
118
119 struct semaphore mcast_mutex;
120 struct semaphore vlan_mutex;
121
122 struct rb_root path_tree;
123 struct list_head path_list;
124
125 struct ipoib_mcast *broadcast;
126 struct list_head multicast_list;
127 struct rb_root multicast_tree;
128
129 struct work_struct pkey_task;
130 struct work_struct mcast_task;
131 struct work_struct flush_task;
132 struct work_struct restart_task;
133 struct work_struct ah_reap_task;
134
135 struct ib_device *ca;
136 u8 port;
137 u16 pkey;
138 struct ib_pd *pd;
139 struct ib_mr *mr;
140 struct ib_cq *cq;
141 struct ib_qp *qp;
142 u32 qkey;
143
144 union ib_gid local_gid;
145 u16 local_lid;
146 u8 local_rate;
147
148 unsigned int admin_mtu;
149 unsigned int mcast_mtu;
150
151 struct ipoib_buf *rx_ring;
152
153 spinlock_t tx_lock;
154 struct ipoib_buf *tx_ring;
155 unsigned tx_head;
156 unsigned tx_tail;
157 struct ib_sge tx_sge;
158 struct ib_send_wr tx_wr;
159
160 struct ib_wc ibwc[IPOIB_NUM_WC];
161
162 struct list_head dead_ahs;
163
164 struct ib_event_handler event_handler;
165
166 struct net_device_stats stats;
167
168 struct net_device *parent;
169 struct list_head child_intfs;
170 struct list_head list;
171
172#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
173 struct list_head fs_list;
174 struct dentry *mcg_dentry;
175#endif
176};
177
178struct ipoib_ah {
179 struct net_device *dev;
180 struct ib_ah *ah;
181 struct list_head list;
182 struct kref ref;
183 unsigned last_send;
184};
185
186struct ipoib_path {
187 struct net_device *dev;
188 struct ib_sa_path_rec pathrec;
189 struct ipoib_ah *ah;
190 struct sk_buff_head queue;
191
192 struct list_head neigh_list;
193
194 int query_id;
195 struct ib_sa_query *query;
196 struct completion done;
197
198 struct rb_node rb_node;
199 struct list_head list;
200};
201
202struct ipoib_neigh {
203 struct ipoib_ah *ah;
204 struct sk_buff_head queue;
205
206 struct neighbour *neighbour;
207
208 struct list_head list;
209};
210
211static inline struct ipoib_neigh **to_ipoib_neigh(struct neighbour *neigh)
212{
213 return (struct ipoib_neigh **) (neigh->ha + 24 -
214 (offsetof(struct neighbour, ha) & 4));
215}
216
217extern struct workqueue_struct *ipoib_workqueue;
218
219/* functions */
220
221void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr);
222
223struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
224 struct ib_pd *pd, struct ib_ah_attr *attr);
225void ipoib_free_ah(struct kref *kref);
226static inline void ipoib_put_ah(struct ipoib_ah *ah)
227{
228 kref_put(&ah->ref, ipoib_free_ah);
229}
230
231int ipoib_add_pkey_attr(struct net_device *dev);
232
233void ipoib_send(struct net_device *dev, struct sk_buff *skb,
234 struct ipoib_ah *address, u32 qpn);
235void ipoib_reap_ah(void *dev_ptr);
236
237void ipoib_flush_paths(struct net_device *dev);
238struct ipoib_dev_priv *ipoib_intf_alloc(const char *format);
239
240int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
241void ipoib_ib_dev_flush(void *dev);
242void ipoib_ib_dev_cleanup(struct net_device *dev);
243
244int ipoib_ib_dev_open(struct net_device *dev);
245int ipoib_ib_dev_up(struct net_device *dev);
246int ipoib_ib_dev_down(struct net_device *dev);
247int ipoib_ib_dev_stop(struct net_device *dev);
248
249int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port);
250void ipoib_dev_cleanup(struct net_device *dev);
251
252void ipoib_mcast_join_task(void *dev_ptr);
253void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
254 struct sk_buff *skb);
255
256void ipoib_mcast_restart_task(void *dev_ptr);
257int ipoib_mcast_start_thread(struct net_device *dev);
258int ipoib_mcast_stop_thread(struct net_device *dev);
259
260void ipoib_mcast_dev_down(struct net_device *dev);
261void ipoib_mcast_dev_flush(struct net_device *dev);
262
263struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
264void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
265int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
266void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
267 union ib_gid *gid,
268 unsigned long *created,
269 unsigned int *queuelen,
270 unsigned int *complete,
271 unsigned int *send_only);
272
273int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
274 union ib_gid *mgid);
275int ipoib_mcast_detach(struct net_device *dev, u16 mlid,
276 union ib_gid *mgid);
277
278int ipoib_qp_create(struct net_device *dev);
279int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
280void ipoib_transport_dev_cleanup(struct net_device *dev);
281
282void ipoib_event(struct ib_event_handler *handler,
283 struct ib_event *record);
284
285int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey);
286int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey);
287
288void ipoib_pkey_poll(void *dev);
289int ipoib_pkey_dev_delay_open(struct net_device *dev);
290
291#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
292int ipoib_create_debug_file(struct net_device *dev);
293void ipoib_delete_debug_file(struct net_device *dev);
294int ipoib_register_debugfs(void);
295void ipoib_unregister_debugfs(void);
296#else
297static inline int ipoib_create_debug_file(struct net_device *dev) { return 0; }
298static inline void ipoib_delete_debug_file(struct net_device *dev) { }
299static inline int ipoib_register_debugfs(void) { return 0; }
300static inline void ipoib_unregister_debugfs(void) { }
301#endif
302
303
304#define ipoib_printk(level, priv, format, arg...) \
305 printk(level "%s: " format, ((struct ipoib_dev_priv *) priv)->dev->name , ## arg)
306#define ipoib_warn(priv, format, arg...) \
307 ipoib_printk(KERN_WARNING, priv, format , ## arg)
308
309
310#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
311extern int ipoib_debug_level;
312
313#define ipoib_dbg(priv, format, arg...) \
314 do { \
315 if (ipoib_debug_level > 0) \
316 ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
317 } while (0)
318#define ipoib_dbg_mcast(priv, format, arg...) \
319 do { \
320 if (mcast_debug_level > 0) \
321 ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
322 } while (0)
323#else /* CONFIG_INFINIBAND_IPOIB_DEBUG */
324#define ipoib_dbg(priv, format, arg...) \
325 do { (void) (priv); } while (0)
326#define ipoib_dbg_mcast(priv, format, arg...) \
327 do { (void) (priv); } while (0)
328#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
329
330#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
331#define ipoib_dbg_data(priv, format, arg...) \
332 do { \
333 if (data_debug_level > 0) \
334 ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
335 } while (0)
336#else /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
337#define ipoib_dbg_data(priv, format, arg...) \
338 do { (void) (priv); } while (0)
339#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG_DATA */
340
341
342#define IPOIB_GID_FMT "%x:%x:%x:%x:%x:%x:%x:%x"
343
344#define IPOIB_GID_ARG(gid) be16_to_cpup((__be16 *) ((gid).raw + 0)), \
345 be16_to_cpup((__be16 *) ((gid).raw + 2)), \
346 be16_to_cpup((__be16 *) ((gid).raw + 4)), \
347 be16_to_cpup((__be16 *) ((gid).raw + 6)), \
348 be16_to_cpup((__be16 *) ((gid).raw + 8)), \
349 be16_to_cpup((__be16 *) ((gid).raw + 10)), \
350 be16_to_cpup((__be16 *) ((gid).raw + 12)), \
351 be16_to_cpup((__be16 *) ((gid).raw + 14))
352
353#endif /* _IPOIB_H */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
new file mode 100644
index 000000000000..044f2c78ef15
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -0,0 +1,287 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_fs.c 1389 2004-12-27 22:56:47Z roland $
33 */
34
35#include <linux/pagemap.h>
36#include <linux/seq_file.h>
37
38#include "ipoib.h"
39
40enum {
41 IPOIB_MAGIC = 0x49504942 /* "IPIB" */
42};
43
44static DECLARE_MUTEX(ipoib_fs_mutex);
45static struct dentry *ipoib_root;
46static struct super_block *ipoib_sb;
47static LIST_HEAD(ipoib_device_list);
48
49static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
50{
51 struct ipoib_mcast_iter *iter;
52 loff_t n = *pos;
53
54 iter = ipoib_mcast_iter_init(file->private);
55 if (!iter)
56 return NULL;
57
58 while (n--) {
59 if (ipoib_mcast_iter_next(iter)) {
60 ipoib_mcast_iter_free(iter);
61 return NULL;
62 }
63 }
64
65 return iter;
66}
67
68static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
69 loff_t *pos)
70{
71 struct ipoib_mcast_iter *iter = iter_ptr;
72
73 (*pos)++;
74
75 if (ipoib_mcast_iter_next(iter)) {
76 ipoib_mcast_iter_free(iter);
77 return NULL;
78 }
79
80 return iter;
81}
82
83static void ipoib_mcg_seq_stop(struct seq_file *file, void *iter_ptr)
84{
85 /* nothing for now */
86}
87
88static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
89{
90 struct ipoib_mcast_iter *iter = iter_ptr;
91 char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
92 union ib_gid mgid;
93 int i, n;
94 unsigned long created;
95 unsigned int queuelen, complete, send_only;
96
97 if (iter) {
98 ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
99 &complete, &send_only);
100
101 for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
102 n += sprintf(gid_buf + n, "%x",
103 be16_to_cpu(((u16 *)mgid.raw)[i]));
104 if (i < sizeof mgid / 2 - 1)
105 gid_buf[n++] = ':';
106 }
107 }
108
109 seq_printf(file, "GID: %*s", -(1 + (int) sizeof gid_buf), gid_buf);
110
111 seq_printf(file,
112 " created: %10ld queuelen: %4d complete: %d send_only: %d\n",
113 created, queuelen, complete, send_only);
114
115 return 0;
116}
117
118static struct seq_operations ipoib_seq_ops = {
119 .start = ipoib_mcg_seq_start,
120 .next = ipoib_mcg_seq_next,
121 .stop = ipoib_mcg_seq_stop,
122 .show = ipoib_mcg_seq_show,
123};
124
125static int ipoib_mcg_open(struct inode *inode, struct file *file)
126{
127 struct seq_file *seq;
128 int ret;
129
130 ret = seq_open(file, &ipoib_seq_ops);
131 if (ret)
132 return ret;
133
134 seq = file->private_data;
135 seq->private = inode->u.generic_ip;
136
137 return 0;
138}
139
140static struct file_operations ipoib_fops = {
141 .owner = THIS_MODULE,
142 .open = ipoib_mcg_open,
143 .read = seq_read,
144 .llseek = seq_lseek,
145 .release = seq_release
146};
147
148static struct inode *ipoib_get_inode(void)
149{
150 struct inode *inode = new_inode(ipoib_sb);
151
152 if (inode) {
153 inode->i_mode = S_IFREG | S_IRUGO;
154 inode->i_uid = 0;
155 inode->i_gid = 0;
156 inode->i_blksize = PAGE_CACHE_SIZE;
157 inode->i_blocks = 0;
158 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
159 inode->i_fop = &ipoib_fops;
160 }
161
162 return inode;
163}
164
165static int __ipoib_create_debug_file(struct net_device *dev)
166{
167 struct ipoib_dev_priv *priv = netdev_priv(dev);
168 struct dentry *dentry;
169 struct inode *inode;
170 char name[IFNAMSIZ + sizeof "_mcg"];
171
172 snprintf(name, sizeof name, "%s_mcg", dev->name);
173
174 dentry = d_alloc_name(ipoib_root, name);
175 if (!dentry)
176 return -ENOMEM;
177
178 inode = ipoib_get_inode();
179 if (!inode) {
180 dput(dentry);
181 return -ENOMEM;
182 }
183
184 inode->u.generic_ip = dev;
185 priv->mcg_dentry = dentry;
186
187 d_add(dentry, inode);
188
189 return 0;
190}
191
192int ipoib_create_debug_file(struct net_device *dev)
193{
194 struct ipoib_dev_priv *priv = netdev_priv(dev);
195
196 down(&ipoib_fs_mutex);
197
198 list_add_tail(&priv->fs_list, &ipoib_device_list);
199
200 if (!ipoib_sb) {
201 up(&ipoib_fs_mutex);
202 return 0;
203 }
204
205 up(&ipoib_fs_mutex);
206
207 return __ipoib_create_debug_file(dev);
208}
209
210void ipoib_delete_debug_file(struct net_device *dev)
211{
212 struct ipoib_dev_priv *priv = netdev_priv(dev);
213
214 down(&ipoib_fs_mutex);
215 list_del(&priv->fs_list);
216 if (!ipoib_sb) {
217 up(&ipoib_fs_mutex);
218 return;
219 }
220 up(&ipoib_fs_mutex);
221
222 if (priv->mcg_dentry) {
223 d_drop(priv->mcg_dentry);
224 simple_unlink(ipoib_root->d_inode, priv->mcg_dentry);
225 }
226}
227
228static int ipoib_fill_super(struct super_block *sb, void *data, int silent)
229{
230 static struct tree_descr ipoib_files[] = {
231 { "" }
232 };
233 struct ipoib_dev_priv *priv;
234 int ret;
235
236 ret = simple_fill_super(sb, IPOIB_MAGIC, ipoib_files);
237 if (ret)
238 return ret;
239
240 ipoib_root = sb->s_root;
241
242 down(&ipoib_fs_mutex);
243
244 ipoib_sb = sb;
245
246 list_for_each_entry(priv, &ipoib_device_list, fs_list) {
247 ret = __ipoib_create_debug_file(priv->dev);
248 if (ret)
249 break;
250 }
251
252 up(&ipoib_fs_mutex);
253
254 return ret;
255}
256
257static struct super_block *ipoib_get_sb(struct file_system_type *fs_type,
258 int flags, const char *dev_name, void *data)
259{
260 return get_sb_single(fs_type, flags, data, ipoib_fill_super);
261}
262
263static void ipoib_kill_sb(struct super_block *sb)
264{
265 down(&ipoib_fs_mutex);
266 ipoib_sb = NULL;
267 up(&ipoib_fs_mutex);
268
269 kill_litter_super(sb);
270}
271
272static struct file_system_type ipoib_fs_type = {
273 .owner = THIS_MODULE,
274 .name = "ipoib_debugfs",
275 .get_sb = ipoib_get_sb,
276 .kill_sb = ipoib_kill_sb,
277};
278
279int ipoib_register_debugfs(void)
280{
281 return register_filesystem(&ipoib_fs_type);
282}
283
284void ipoib_unregister_debugfs(void)
285{
286 unregister_filesystem(&ipoib_fs_type);
287}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
new file mode 100644
index 000000000000..c5a1d45e0ac5
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -0,0 +1,668 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_ib.c 1386 2004-12-27 16:23:17Z roland $
33 */
34
35#include <linux/delay.h>
36#include <linux/dma-mapping.h>
37
38#include <ib_cache.h>
39
40#include "ipoib.h"
41
42#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
43static int data_debug_level;
44
45module_param(data_debug_level, int, 0644);
46MODULE_PARM_DESC(data_debug_level,
47 "Enable data path debug tracing if > 0");
48#endif
49
50#define IPOIB_OP_RECV (1ul << 31)
51
52static DECLARE_MUTEX(pkey_sem);
53
54struct ipoib_ah *ipoib_create_ah(struct net_device *dev,
55 struct ib_pd *pd, struct ib_ah_attr *attr)
56{
57 struct ipoib_ah *ah;
58
59 ah = kmalloc(sizeof *ah, GFP_KERNEL);
60 if (!ah)
61 return NULL;
62
63 ah->dev = dev;
64 ah->last_send = 0;
65 kref_init(&ah->ref);
66
67 ah->ah = ib_create_ah(pd, attr);
68 if (IS_ERR(ah->ah)) {
69 kfree(ah);
70 ah = NULL;
71 } else
72 ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah);
73
74 return ah;
75}
76
77void ipoib_free_ah(struct kref *kref)
78{
79 struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref);
80 struct ipoib_dev_priv *priv = netdev_priv(ah->dev);
81
82 unsigned long flags;
83
84 if (ah->last_send <= priv->tx_tail) {
85 ipoib_dbg(priv, "Freeing ah %p\n", ah->ah);
86 ib_destroy_ah(ah->ah);
87 kfree(ah);
88 } else {
89 spin_lock_irqsave(&priv->lock, flags);
90 list_add_tail(&ah->list, &priv->dead_ahs);
91 spin_unlock_irqrestore(&priv->lock, flags);
92 }
93}
94
95static inline int ipoib_ib_receive(struct ipoib_dev_priv *priv,
96 unsigned int wr_id,
97 dma_addr_t addr)
98{
99 struct ib_sge list = {
100 .addr = addr,
101 .length = IPOIB_BUF_SIZE,
102 .lkey = priv->mr->lkey,
103 };
104 struct ib_recv_wr param = {
105 .wr_id = wr_id | IPOIB_OP_RECV,
106 .sg_list = &list,
107 .num_sge = 1,
108 };
109 struct ib_recv_wr *bad_wr;
110
111 return ib_post_recv(priv->qp, &param, &bad_wr);
112}
113
114static int ipoib_ib_post_receive(struct net_device *dev, int id)
115{
116 struct ipoib_dev_priv *priv = netdev_priv(dev);
117 struct sk_buff *skb;
118 dma_addr_t addr;
119 int ret;
120
121 skb = dev_alloc_skb(IPOIB_BUF_SIZE + 4);
122 if (!skb) {
123 ipoib_warn(priv, "failed to allocate receive buffer\n");
124
125 priv->rx_ring[id].skb = NULL;
126 return -ENOMEM;
127 }
128 skb_reserve(skb, 4); /* 16 byte align IP header */
129 priv->rx_ring[id].skb = skb;
130 addr = dma_map_single(priv->ca->dma_device,
131 skb->data, IPOIB_BUF_SIZE,
132 DMA_FROM_DEVICE);
133 pci_unmap_addr_set(&priv->rx_ring[id], mapping, addr);
134
135 ret = ipoib_ib_receive(priv, id, addr);
136 if (ret) {
137 ipoib_warn(priv, "ipoib_ib_receive failed for buf %d (%d)\n",
138 id, ret);
139 dma_unmap_single(priv->ca->dma_device, addr,
140 IPOIB_BUF_SIZE, DMA_FROM_DEVICE);
141 dev_kfree_skb_any(skb);
142 priv->rx_ring[id].skb = NULL;
143 }
144
145 return ret;
146}
147
148static int ipoib_ib_post_receives(struct net_device *dev)
149{
150 struct ipoib_dev_priv *priv = netdev_priv(dev);
151 int i;
152
153 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i) {
154 if (ipoib_ib_post_receive(dev, i)) {
155 ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i);
156 return -EIO;
157 }
158 }
159
160 return 0;
161}
162
163static void ipoib_ib_handle_wc(struct net_device *dev,
164 struct ib_wc *wc)
165{
166 struct ipoib_dev_priv *priv = netdev_priv(dev);
167 unsigned int wr_id = wc->wr_id;
168
169 ipoib_dbg_data(priv, "called: id %d, op %d, status: %d\n",
170 wr_id, wc->opcode, wc->status);
171
172 if (wr_id & IPOIB_OP_RECV) {
173 wr_id &= ~IPOIB_OP_RECV;
174
175 if (wr_id < IPOIB_RX_RING_SIZE) {
176 struct sk_buff *skb = priv->rx_ring[wr_id].skb;
177
178 priv->rx_ring[wr_id].skb = NULL;
179
180 dma_unmap_single(priv->ca->dma_device,
181 pci_unmap_addr(&priv->rx_ring[wr_id],
182 mapping),
183 IPOIB_BUF_SIZE,
184 DMA_FROM_DEVICE);
185
186 if (wc->status != IB_WC_SUCCESS) {
187 if (wc->status != IB_WC_WR_FLUSH_ERR)
188 ipoib_warn(priv, "failed recv event "
189 "(status=%d, wrid=%d vend_err %x)\n",
190 wc->status, wr_id, wc->vendor_err);
191 dev_kfree_skb_any(skb);
192 return;
193 }
194
195 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
196 wc->byte_len, wc->slid);
197
198 skb_put(skb, wc->byte_len);
199 skb_pull(skb, IB_GRH_BYTES);
200
201 if (wc->slid != priv->local_lid ||
202 wc->src_qp != priv->qp->qp_num) {
203 skb->protocol = ((struct ipoib_header *) skb->data)->proto;
204
205 skb_pull(skb, IPOIB_ENCAP_LEN);
206
207 dev->last_rx = jiffies;
208 ++priv->stats.rx_packets;
209 priv->stats.rx_bytes += skb->len;
210
211 skb->dev = dev;
212 /* XXX get correct PACKET_ type here */
213 skb->pkt_type = PACKET_HOST;
214 netif_rx_ni(skb);
215 } else {
216 ipoib_dbg_data(priv, "dropping loopback packet\n");
217 dev_kfree_skb_any(skb);
218 }
219
220 /* repost receive */
221 if (ipoib_ib_post_receive(dev, wr_id))
222 ipoib_warn(priv, "ipoib_ib_post_receive failed "
223 "for buf %d\n", wr_id);
224 } else
225 ipoib_warn(priv, "completion event with wrid %d\n",
226 wr_id);
227
228 } else {
229 struct ipoib_buf *tx_req;
230 unsigned long flags;
231
232 if (wr_id >= IPOIB_TX_RING_SIZE) {
233 ipoib_warn(priv, "completion event with wrid %d (> %d)\n",
234 wr_id, IPOIB_TX_RING_SIZE);
235 return;
236 }
237
238 ipoib_dbg_data(priv, "send complete, wrid %d\n", wr_id);
239
240 tx_req = &priv->tx_ring[wr_id];
241
242 dma_unmap_single(priv->ca->dma_device,
243 pci_unmap_addr(tx_req, mapping),
244 tx_req->skb->len,
245 DMA_TO_DEVICE);
246
247 ++priv->stats.tx_packets;
248 priv->stats.tx_bytes += tx_req->skb->len;
249
250 dev_kfree_skb_any(tx_req->skb);
251
252 spin_lock_irqsave(&priv->tx_lock, flags);
253 ++priv->tx_tail;
254 if (netif_queue_stopped(dev) &&
255 priv->tx_head - priv->tx_tail <= IPOIB_TX_RING_SIZE / 2)
256 netif_wake_queue(dev);
257 spin_unlock_irqrestore(&priv->tx_lock, flags);
258
259 if (wc->status != IB_WC_SUCCESS &&
260 wc->status != IB_WC_WR_FLUSH_ERR)
261 ipoib_warn(priv, "failed send event "
262 "(status=%d, wrid=%d vend_err %x)\n",
263 wc->status, wr_id, wc->vendor_err);
264 }
265}
266
267void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
268{
269 struct net_device *dev = (struct net_device *) dev_ptr;
270 struct ipoib_dev_priv *priv = netdev_priv(dev);
271 int n, i;
272
273 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
274 do {
275 n = ib_poll_cq(cq, IPOIB_NUM_WC, priv->ibwc);
276 for (i = 0; i < n; ++i)
277 ipoib_ib_handle_wc(dev, priv->ibwc + i);
278 } while (n == IPOIB_NUM_WC);
279}
280
281static inline int post_send(struct ipoib_dev_priv *priv,
282 unsigned int wr_id,
283 struct ib_ah *address, u32 qpn,
284 dma_addr_t addr, int len)
285{
286 struct ib_send_wr *bad_wr;
287
288 priv->tx_sge.addr = addr;
289 priv->tx_sge.length = len;
290
291 priv->tx_wr.wr_id = wr_id;
292 priv->tx_wr.wr.ud.remote_qpn = qpn;
293 priv->tx_wr.wr.ud.ah = address;
294
295 return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
296}
297
298void ipoib_send(struct net_device *dev, struct sk_buff *skb,
299 struct ipoib_ah *address, u32 qpn)
300{
301 struct ipoib_dev_priv *priv = netdev_priv(dev);
302 struct ipoib_buf *tx_req;
303 dma_addr_t addr;
304
305 if (skb->len > dev->mtu + INFINIBAND_ALEN) {
306 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
307 skb->len, dev->mtu + INFINIBAND_ALEN);
308 ++priv->stats.tx_dropped;
309 ++priv->stats.tx_errors;
310 dev_kfree_skb_any(skb);
311 return;
312 }
313
314 ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n",
315 skb->len, address, qpn);
316
317 /*
318 * We put the skb into the tx_ring _before_ we call post_send()
319 * because it's entirely possible that the completion handler will
320 * run before we execute anything after the post_send(). That
321 * means we have to make sure everything is properly recorded and
322 * our state is consistent before we call post_send().
323 */
324 tx_req = &priv->tx_ring[priv->tx_head & (IPOIB_TX_RING_SIZE - 1)];
325 tx_req->skb = skb;
326 addr = dma_map_single(priv->ca->dma_device, skb->data, skb->len,
327 DMA_TO_DEVICE);
328 pci_unmap_addr_set(tx_req, mapping, addr);
329
330 if (unlikely(post_send(priv, priv->tx_head & (IPOIB_TX_RING_SIZE - 1),
331 address->ah, qpn, addr, skb->len))) {
332 ipoib_warn(priv, "post_send failed\n");
333 ++priv->stats.tx_errors;
334 dma_unmap_single(priv->ca->dma_device, addr, skb->len,
335 DMA_TO_DEVICE);
336 dev_kfree_skb_any(skb);
337 } else {
338 dev->trans_start = jiffies;
339
340 address->last_send = priv->tx_head;
341 ++priv->tx_head;
342
343 if (priv->tx_head - priv->tx_tail == IPOIB_TX_RING_SIZE) {
344 ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
345 netif_stop_queue(dev);
346 }
347 }
348}
349
350static void __ipoib_reap_ah(struct net_device *dev)
351{
352 struct ipoib_dev_priv *priv = netdev_priv(dev);
353 struct ipoib_ah *ah, *tah;
354 LIST_HEAD(remove_list);
355
356 spin_lock_irq(&priv->lock);
357 list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
358 if (ah->last_send <= priv->tx_tail) {
359 list_del(&ah->list);
360 list_add_tail(&ah->list, &remove_list);
361 }
362 spin_unlock_irq(&priv->lock);
363
364 list_for_each_entry_safe(ah, tah, &remove_list, list) {
365 ipoib_dbg(priv, "Reaping ah %p\n", ah->ah);
366 ib_destroy_ah(ah->ah);
367 kfree(ah);
368 }
369}
370
371void ipoib_reap_ah(void *dev_ptr)
372{
373 struct net_device *dev = dev_ptr;
374 struct ipoib_dev_priv *priv = netdev_priv(dev);
375
376 __ipoib_reap_ah(dev);
377
378 if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
379 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
380}
381
382int ipoib_ib_dev_open(struct net_device *dev)
383{
384 struct ipoib_dev_priv *priv = netdev_priv(dev);
385 int ret;
386
387 ret = ipoib_qp_create(dev);
388 if (ret) {
389 ipoib_warn(priv, "ipoib_qp_create returned %d\n", ret);
390 return -1;
391 }
392
393 ret = ipoib_ib_post_receives(dev);
394 if (ret) {
395 ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret);
396 return -1;
397 }
398
399 clear_bit(IPOIB_STOP_REAPER, &priv->flags);
400 queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, HZ);
401
402 return 0;
403}
404
405int ipoib_ib_dev_up(struct net_device *dev)
406{
407 struct ipoib_dev_priv *priv = netdev_priv(dev);
408
409 set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
410
411 return ipoib_mcast_start_thread(dev);
412}
413
414int ipoib_ib_dev_down(struct net_device *dev)
415{
416 struct ipoib_dev_priv *priv = netdev_priv(dev);
417
418 ipoib_dbg(priv, "downing ib_dev\n");
419
420 clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
421 netif_carrier_off(dev);
422
423 /* Shutdown the P_Key thread if still active */
424 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
425 down(&pkey_sem);
426 set_bit(IPOIB_PKEY_STOP, &priv->flags);
427 cancel_delayed_work(&priv->pkey_task);
428 up(&pkey_sem);
429 flush_workqueue(ipoib_workqueue);
430 }
431
432 ipoib_mcast_stop_thread(dev);
433
434 /*
435 * Flush the multicast groups first so we stop any multicast joins. The
436 * completion thread may have already died and we may deadlock waiting
437 * for the completion thread to finish some multicast joins.
438 */
439 ipoib_mcast_dev_flush(dev);
440
441 /* Delete broadcast and local addresses since they will be recreated */
442 ipoib_mcast_dev_down(dev);
443
444 ipoib_flush_paths(dev);
445
446 return 0;
447}
448
449static int recvs_pending(struct net_device *dev)
450{
451 struct ipoib_dev_priv *priv = netdev_priv(dev);
452 int pending = 0;
453 int i;
454
455 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
456 if (priv->rx_ring[i].skb)
457 ++pending;
458
459 return pending;
460}
461
462int ipoib_ib_dev_stop(struct net_device *dev)
463{
464 struct ipoib_dev_priv *priv = netdev_priv(dev);
465 struct ib_qp_attr qp_attr;
466 int attr_mask;
467 unsigned long begin;
468 struct ipoib_buf *tx_req;
469 int i;
470
471 /* Kill the existing QP and allocate a new one */
472 qp_attr.qp_state = IB_QPS_ERR;
473 attr_mask = IB_QP_STATE;
474 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
475 ipoib_warn(priv, "Failed to modify QP to ERROR state\n");
476
477 /* Wait for all sends and receives to complete */
478 begin = jiffies;
479
480 while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) {
481 if (time_after(jiffies, begin + 5 * HZ)) {
482 ipoib_warn(priv, "timing out; %d sends %d receives not completed\n",
483 priv->tx_head - priv->tx_tail, recvs_pending(dev));
484
485 /*
486 * assume the HW is wedged and just free up
487 * all our pending work requests.
488 */
489 while (priv->tx_tail < priv->tx_head) {
490 tx_req = &priv->tx_ring[priv->tx_tail &
491 (IPOIB_TX_RING_SIZE - 1)];
492 dma_unmap_single(priv->ca->dma_device,
493 pci_unmap_addr(tx_req, mapping),
494 tx_req->skb->len,
495 DMA_TO_DEVICE);
496 dev_kfree_skb_any(tx_req->skb);
497 ++priv->tx_tail;
498 }
499
500 for (i = 0; i < IPOIB_RX_RING_SIZE; ++i)
501 if (priv->rx_ring[i].skb) {
502 dma_unmap_single(priv->ca->dma_device,
503 pci_unmap_addr(&priv->rx_ring[i],
504 mapping),
505 IPOIB_BUF_SIZE,
506 DMA_FROM_DEVICE);
507 dev_kfree_skb_any(priv->rx_ring[i].skb);
508 priv->rx_ring[i].skb = NULL;
509 }
510
511 goto timeout;
512 }
513
514 msleep(1);
515 }
516
517 ipoib_dbg(priv, "All sends and receives done.\n");
518
519timeout:
520 qp_attr.qp_state = IB_QPS_RESET;
521 attr_mask = IB_QP_STATE;
522 if (ib_modify_qp(priv->qp, &qp_attr, attr_mask))
523 ipoib_warn(priv, "Failed to modify QP to RESET state\n");
524
525 /* Wait for all AHs to be reaped */
526 set_bit(IPOIB_STOP_REAPER, &priv->flags);
527 cancel_delayed_work(&priv->ah_reap_task);
528 flush_workqueue(ipoib_workqueue);
529
530 begin = jiffies;
531
532 while (!list_empty(&priv->dead_ahs)) {
533 __ipoib_reap_ah(dev);
534
535 if (time_after(jiffies, begin + HZ)) {
536 ipoib_warn(priv, "timing out; will leak address handles\n");
537 break;
538 }
539
540 msleep(1);
541 }
542
543 return 0;
544}
545
546int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
547{
548 struct ipoib_dev_priv *priv = netdev_priv(dev);
549
550 priv->ca = ca;
551 priv->port = port;
552 priv->qp = NULL;
553
554 if (ipoib_transport_dev_init(dev, ca)) {
555 printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name);
556 return -ENODEV;
557 }
558
559 if (dev->flags & IFF_UP) {
560 if (ipoib_ib_dev_open(dev)) {
561 ipoib_transport_dev_cleanup(dev);
562 return -ENODEV;
563 }
564 }
565
566 return 0;
567}
568
569void ipoib_ib_dev_flush(void *_dev)
570{
571 struct net_device *dev = (struct net_device *)_dev;
572 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv;
573
574 if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
575 return;
576
577 ipoib_dbg(priv, "flushing\n");
578
579 ipoib_ib_dev_down(dev);
580
581 /*
582 * The device could have been brought down between the start and when
583 * we get here, don't bring it back up if it's not configured up
584 */
585 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
586 ipoib_ib_dev_up(dev);
587
588 /* Flush any child interfaces too */
589 list_for_each_entry(cpriv, &priv->child_intfs, list)
590 ipoib_ib_dev_flush(&cpriv->dev);
591}
592
593void ipoib_ib_dev_cleanup(struct net_device *dev)
594{
595 struct ipoib_dev_priv *priv = netdev_priv(dev);
596
597 ipoib_dbg(priv, "cleaning up ib_dev\n");
598
599 ipoib_mcast_stop_thread(dev);
600
601 /* Delete the broadcast address and the local address */
602 ipoib_mcast_dev_down(dev);
603
604 ipoib_transport_dev_cleanup(dev);
605}
606
607/*
608 * Delayed P_Key Assigment Interim Support
609 *
610 * The following is initial implementation of delayed P_Key assigment
611 * mechanism. It is using the same approach implemented for the multicast
612 * group join. The single goal of this implementation is to quickly address
613 * Bug #2507. This implementation will probably be removed when the P_Key
614 * change async notification is available.
615 */
616int ipoib_open(struct net_device *dev);
617
618static void ipoib_pkey_dev_check_presence(struct net_device *dev)
619{
620 struct ipoib_dev_priv *priv = netdev_priv(dev);
621 u16 pkey_index = 0;
622
623 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index))
624 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
625 else
626 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
627}
628
629void ipoib_pkey_poll(void *dev_ptr)
630{
631 struct net_device *dev = dev_ptr;
632 struct ipoib_dev_priv *priv = netdev_priv(dev);
633
634 ipoib_pkey_dev_check_presence(dev);
635
636 if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags))
637 ipoib_open(dev);
638 else {
639 down(&pkey_sem);
640 if (!test_bit(IPOIB_PKEY_STOP, &priv->flags))
641 queue_delayed_work(ipoib_workqueue,
642 &priv->pkey_task,
643 HZ);
644 up(&pkey_sem);
645 }
646}
647
648int ipoib_pkey_dev_delay_open(struct net_device *dev)
649{
650 struct ipoib_dev_priv *priv = netdev_priv(dev);
651
652 /* Look for the interface pkey value in the IB Port P_Key table and */
653 /* set the interface pkey assigment flag */
654 ipoib_pkey_dev_check_presence(dev);
655
656 /* P_Key value not assigned yet - start polling */
657 if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) {
658 down(&pkey_sem);
659 clear_bit(IPOIB_PKEY_STOP, &priv->flags);
660 queue_delayed_work(ipoib_workqueue,
661 &priv->pkey_task,
662 HZ);
663 up(&pkey_sem);
664 return 1;
665 }
666
667 return 0;
668}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
new file mode 100644
index 000000000000..5a3b5c6a4494
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -0,0 +1,1103 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_main.c 1377 2004-12-23 19:57:12Z roland $
33 */
34
35#include "ipoib.h"
36
37#include <linux/version.h>
38#include <linux/module.h>
39
40#include <linux/init.h>
41#include <linux/slab.h>
42#include <linux/vmalloc.h>
43
44#include <linux/if_arp.h> /* For ARPHRD_xxx */
45
46#include <linux/ip.h>
47#include <linux/in.h>
48
49MODULE_AUTHOR("Roland Dreier");
50MODULE_DESCRIPTION("IP-over-InfiniBand net driver");
51MODULE_LICENSE("Dual BSD/GPL");
52
53#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
54int ipoib_debug_level;
55
56module_param_named(debug_level, ipoib_debug_level, int, 0644);
57MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
58#endif
59
60static const u8 ipv4_bcast_addr[] = {
61 0x00, 0xff, 0xff, 0xff,
62 0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
63 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff
64};
65
66struct workqueue_struct *ipoib_workqueue;
67
68static void ipoib_add_one(struct ib_device *device);
69static void ipoib_remove_one(struct ib_device *device);
70
71static struct ib_client ipoib_client = {
72 .name = "ipoib",
73 .add = ipoib_add_one,
74 .remove = ipoib_remove_one
75};
76
77int ipoib_open(struct net_device *dev)
78{
79 struct ipoib_dev_priv *priv = netdev_priv(dev);
80
81 ipoib_dbg(priv, "bringing up interface\n");
82
83 set_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
84
85 if (ipoib_pkey_dev_delay_open(dev))
86 return 0;
87
88 if (ipoib_ib_dev_open(dev))
89 return -EINVAL;
90
91 if (ipoib_ib_dev_up(dev))
92 return -EINVAL;
93
94 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
95 struct ipoib_dev_priv *cpriv;
96
97 /* Bring up any child interfaces too */
98 down(&priv->vlan_mutex);
99 list_for_each_entry(cpriv, &priv->child_intfs, list) {
100 int flags;
101
102 flags = cpriv->dev->flags;
103 if (flags & IFF_UP)
104 continue;
105
106 dev_change_flags(cpriv->dev, flags | IFF_UP);
107 }
108 up(&priv->vlan_mutex);
109 }
110
111 netif_start_queue(dev);
112
113 return 0;
114}
115
116static int ipoib_stop(struct net_device *dev)
117{
118 struct ipoib_dev_priv *priv = netdev_priv(dev);
119
120 ipoib_dbg(priv, "stopping interface\n");
121
122 clear_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags);
123
124 netif_stop_queue(dev);
125
126 ipoib_ib_dev_down(dev);
127 ipoib_ib_dev_stop(dev);
128
129 if (!test_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags)) {
130 struct ipoib_dev_priv *cpriv;
131
132 /* Bring down any child interfaces too */
133 down(&priv->vlan_mutex);
134 list_for_each_entry(cpriv, &priv->child_intfs, list) {
135 int flags;
136
137 flags = cpriv->dev->flags;
138 if (!(flags & IFF_UP))
139 continue;
140
141 dev_change_flags(cpriv->dev, flags & ~IFF_UP);
142 }
143 up(&priv->vlan_mutex);
144 }
145
146 return 0;
147}
148
149static int ipoib_change_mtu(struct net_device *dev, int new_mtu)
150{
151 struct ipoib_dev_priv *priv = netdev_priv(dev);
152
153 if (new_mtu > IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN)
154 return -EINVAL;
155
156 priv->admin_mtu = new_mtu;
157
158 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
159
160 return 0;
161}
162
163static struct ipoib_path *__path_find(struct net_device *dev,
164 union ib_gid *gid)
165{
166 struct ipoib_dev_priv *priv = netdev_priv(dev);
167 struct rb_node *n = priv->path_tree.rb_node;
168 struct ipoib_path *path;
169 int ret;
170
171 while (n) {
172 path = rb_entry(n, struct ipoib_path, rb_node);
173
174 ret = memcmp(gid->raw, path->pathrec.dgid.raw,
175 sizeof (union ib_gid));
176
177 if (ret < 0)
178 n = n->rb_left;
179 else if (ret > 0)
180 n = n->rb_right;
181 else
182 return path;
183 }
184
185 return NULL;
186}
187
188static int __path_add(struct net_device *dev, struct ipoib_path *path)
189{
190 struct ipoib_dev_priv *priv = netdev_priv(dev);
191 struct rb_node **n = &priv->path_tree.rb_node;
192 struct rb_node *pn = NULL;
193 struct ipoib_path *tpath;
194 int ret;
195
196 while (*n) {
197 pn = *n;
198 tpath = rb_entry(pn, struct ipoib_path, rb_node);
199
200 ret = memcmp(path->pathrec.dgid.raw, tpath->pathrec.dgid.raw,
201 sizeof (union ib_gid));
202 if (ret < 0)
203 n = &pn->rb_left;
204 else if (ret > 0)
205 n = &pn->rb_right;
206 else
207 return -EEXIST;
208 }
209
210 rb_link_node(&path->rb_node, pn, n);
211 rb_insert_color(&path->rb_node, &priv->path_tree);
212
213 list_add_tail(&path->list, &priv->path_list);
214
215 return 0;
216}
217
218static void path_free(struct net_device *dev, struct ipoib_path *path)
219{
220 struct ipoib_dev_priv *priv = netdev_priv(dev);
221 struct ipoib_neigh *neigh, *tn;
222 struct sk_buff *skb;
223 unsigned long flags;
224
225 while ((skb = __skb_dequeue(&path->queue)))
226 dev_kfree_skb_irq(skb);
227
228 spin_lock_irqsave(&priv->lock, flags);
229
230 list_for_each_entry_safe(neigh, tn, &path->neigh_list, list) {
231 /*
232 * It's safe to call ipoib_put_ah() inside priv->lock
233 * here, because we know that path->ah will always
234 * hold one more reference, so ipoib_put_ah() will
235 * never do more than decrement the ref count.
236 */
237 if (neigh->ah)
238 ipoib_put_ah(neigh->ah);
239 *to_ipoib_neigh(neigh->neighbour) = NULL;
240 neigh->neighbour->ops->destructor = NULL;
241 kfree(neigh);
242 }
243
244 spin_unlock_irqrestore(&priv->lock, flags);
245
246 if (path->ah)
247 ipoib_put_ah(path->ah);
248
249 kfree(path);
250}
251
252void ipoib_flush_paths(struct net_device *dev)
253{
254 struct ipoib_dev_priv *priv = netdev_priv(dev);
255 struct ipoib_path *path, *tp;
256 LIST_HEAD(remove_list);
257 unsigned long flags;
258
259 spin_lock_irqsave(&priv->lock, flags);
260
261 list_splice(&priv->path_list, &remove_list);
262 INIT_LIST_HEAD(&priv->path_list);
263
264 list_for_each_entry(path, &remove_list, list)
265 rb_erase(&path->rb_node, &priv->path_tree);
266
267 spin_unlock_irqrestore(&priv->lock, flags);
268
269 list_for_each_entry_safe(path, tp, &remove_list, list) {
270 if (path->query)
271 ib_sa_cancel_query(path->query_id, path->query);
272 wait_for_completion(&path->done);
273 path_free(dev, path);
274 }
275}
276
277static void path_rec_completion(int status,
278 struct ib_sa_path_rec *pathrec,
279 void *path_ptr)
280{
281 struct ipoib_path *path = path_ptr;
282 struct net_device *dev = path->dev;
283 struct ipoib_dev_priv *priv = netdev_priv(dev);
284 struct ipoib_ah *ah = NULL;
285 struct ipoib_neigh *neigh;
286 struct sk_buff_head skqueue;
287 struct sk_buff *skb;
288 unsigned long flags;
289
290 if (pathrec)
291 ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
292 be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
293 else
294 ipoib_dbg(priv, "PathRec status %d for GID " IPOIB_GID_FMT "\n",
295 status, IPOIB_GID_ARG(path->pathrec.dgid));
296
297 skb_queue_head_init(&skqueue);
298
299 if (!status) {
300 struct ib_ah_attr av = {
301 .dlid = be16_to_cpu(pathrec->dlid),
302 .sl = pathrec->sl,
303 .port_num = priv->port
304 };
305
306 if (ib_sa_rate_enum_to_int(pathrec->rate) > 0)
307 av.static_rate = (2 * priv->local_rate -
308 ib_sa_rate_enum_to_int(pathrec->rate) - 1) /
309 (priv->local_rate ? priv->local_rate : 1);
310
311 ipoib_dbg(priv, "static_rate %d for local port %dX, path %dX\n",
312 av.static_rate, priv->local_rate,
313 ib_sa_rate_enum_to_int(pathrec->rate));
314
315 ah = ipoib_create_ah(dev, priv->pd, &av);
316 }
317
318 spin_lock_irqsave(&priv->lock, flags);
319
320 path->ah = ah;
321
322 if (ah) {
323 path->pathrec = *pathrec;
324
325 ipoib_dbg(priv, "created address handle %p for LID 0x%04x, SL %d\n",
326 ah, be16_to_cpu(pathrec->dlid), pathrec->sl);
327
328 while ((skb = __skb_dequeue(&path->queue)))
329 __skb_queue_tail(&skqueue, skb);
330
331 list_for_each_entry(neigh, &path->neigh_list, list) {
332 kref_get(&path->ah->ref);
333 neigh->ah = path->ah;
334
335 while ((skb = __skb_dequeue(&neigh->queue)))
336 __skb_queue_tail(&skqueue, skb);
337 }
338 } else
339 path->query = NULL;
340
341 complete(&path->done);
342
343 spin_unlock_irqrestore(&priv->lock, flags);
344
345 while ((skb = __skb_dequeue(&skqueue))) {
346 skb->dev = dev;
347 if (dev_queue_xmit(skb))
348 ipoib_warn(priv, "dev_queue_xmit failed "
349 "to requeue packet\n");
350 }
351}
352
353static struct ipoib_path *path_rec_create(struct net_device *dev,
354 union ib_gid *gid)
355{
356 struct ipoib_dev_priv *priv = netdev_priv(dev);
357 struct ipoib_path *path;
358
359 path = kmalloc(sizeof *path, GFP_ATOMIC);
360 if (!path)
361 return NULL;
362
363 path->dev = dev;
364 path->pathrec.dlid = 0;
365 path->ah = NULL;
366
367 skb_queue_head_init(&path->queue);
368
369 INIT_LIST_HEAD(&path->neigh_list);
370 path->query = NULL;
371 init_completion(&path->done);
372
373 memcpy(path->pathrec.dgid.raw, gid->raw, sizeof (union ib_gid));
374 path->pathrec.sgid = priv->local_gid;
375 path->pathrec.pkey = cpu_to_be16(priv->pkey);
376 path->pathrec.numb_path = 1;
377
378 return path;
379}
380
381static int path_rec_start(struct net_device *dev,
382 struct ipoib_path *path)
383{
384 struct ipoib_dev_priv *priv = netdev_priv(dev);
385
386 ipoib_dbg(priv, "Start path record lookup for " IPOIB_GID_FMT "\n",
387 IPOIB_GID_ARG(path->pathrec.dgid));
388
389 path->query_id =
390 ib_sa_path_rec_get(priv->ca, priv->port,
391 &path->pathrec,
392 IB_SA_PATH_REC_DGID |
393 IB_SA_PATH_REC_SGID |
394 IB_SA_PATH_REC_NUMB_PATH |
395 IB_SA_PATH_REC_PKEY,
396 1000, GFP_ATOMIC,
397 path_rec_completion,
398 path, &path->query);
399 if (path->query_id < 0) {
400 ipoib_warn(priv, "ib_sa_path_rec_get failed\n");
401 path->query = NULL;
402 return path->query_id;
403 }
404
405 return 0;
406}
407
408static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
409{
410 struct ipoib_dev_priv *priv = netdev_priv(dev);
411 struct ipoib_path *path;
412 struct ipoib_neigh *neigh;
413
414 neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
415 if (!neigh) {
416 ++priv->stats.tx_dropped;
417 dev_kfree_skb_any(skb);
418 return;
419 }
420
421 skb_queue_head_init(&neigh->queue);
422 neigh->neighbour = skb->dst->neighbour;
423 *to_ipoib_neigh(skb->dst->neighbour) = neigh;
424
425 /*
426 * We can only be called from ipoib_start_xmit, so we're
427 * inside tx_lock -- no need to save/restore flags.
428 */
429 spin_lock(&priv->lock);
430
431 path = __path_find(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4));
432 if (!path) {
433 path = path_rec_create(dev,
434 (union ib_gid *) (skb->dst->neighbour->ha + 4));
435 if (!path)
436 goto err;
437
438 __path_add(dev, path);
439 }
440
441 list_add_tail(&neigh->list, &path->neigh_list);
442
443 if (path->pathrec.dlid) {
444 kref_get(&path->ah->ref);
445 neigh->ah = path->ah;
446
447 ipoib_send(dev, skb, path->ah,
448 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
449 } else {
450 neigh->ah = NULL;
451 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
452 __skb_queue_tail(&neigh->queue, skb);
453 } else {
454 ++priv->stats.tx_dropped;
455 dev_kfree_skb_any(skb);
456 }
457
458 if (!path->query && path_rec_start(dev, path))
459 goto err;
460 }
461
462 spin_unlock(&priv->lock);
463 return;
464
465err:
466 *to_ipoib_neigh(skb->dst->neighbour) = NULL;
467 list_del(&neigh->list);
468 neigh->neighbour->ops->destructor = NULL;
469 kfree(neigh);
470
471 ++priv->stats.tx_dropped;
472 dev_kfree_skb_any(skb);
473
474 spin_unlock(&priv->lock);
475}
476
477static void path_lookup(struct sk_buff *skb, struct net_device *dev)
478{
479 struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
480
481 /* Look up path record for unicasts */
482 if (skb->dst->neighbour->ha[4] != 0xff) {
483 neigh_add_path(skb, dev);
484 return;
485 }
486
487 /* Add in the P_Key for multicasts */
488 skb->dst->neighbour->ha[8] = (priv->pkey >> 8) & 0xff;
489 skb->dst->neighbour->ha[9] = priv->pkey & 0xff;
490 ipoib_mcast_send(dev, (union ib_gid *) (skb->dst->neighbour->ha + 4), skb);
491}
492
493static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
494 struct ipoib_pseudoheader *phdr)
495{
496 struct ipoib_dev_priv *priv = netdev_priv(dev);
497 struct ipoib_path *path;
498
499 /*
500 * We can only be called from ipoib_start_xmit, so we're
501 * inside tx_lock -- no need to save/restore flags.
502 */
503 spin_lock(&priv->lock);
504
505 path = __path_find(dev, (union ib_gid *) (phdr->hwaddr + 4));
506 if (!path) {
507 path = path_rec_create(dev,
508 (union ib_gid *) (phdr->hwaddr + 4));
509 if (path) {
510 /* put pseudoheader back on for next time */
511 skb_push(skb, sizeof *phdr);
512 __skb_queue_tail(&path->queue, skb);
513
514 if (path_rec_start(dev, path)) {
515 spin_unlock(&priv->lock);
516 path_free(dev, path);
517 return;
518 } else
519 __path_add(dev, path);
520 } else {
521 ++priv->stats.tx_dropped;
522 dev_kfree_skb_any(skb);
523 }
524
525 spin_unlock(&priv->lock);
526 return;
527 }
528
529 if (path->pathrec.dlid) {
530 ipoib_dbg(priv, "Send unicast ARP to %04x\n",
531 be16_to_cpu(path->pathrec.dlid));
532
533 ipoib_send(dev, skb, path->ah,
534 be32_to_cpup((__be32 *) phdr->hwaddr));
535 } else if ((path->query || !path_rec_start(dev, path)) &&
536 skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
537 /* put pseudoheader back on for next time */
538 skb_push(skb, sizeof *phdr);
539 __skb_queue_tail(&path->queue, skb);
540 } else {
541 ++priv->stats.tx_dropped;
542 dev_kfree_skb_any(skb);
543 }
544
545 spin_unlock(&priv->lock);
546}
547
548static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
549{
550 struct ipoib_dev_priv *priv = netdev_priv(dev);
551 struct ipoib_neigh *neigh;
552 unsigned long flags;
553
554 local_irq_save(flags);
555 if (!spin_trylock(&priv->tx_lock)) {
556 local_irq_restore(flags);
557 return NETDEV_TX_LOCKED;
558 }
559
560 /*
561 * Check if our queue is stopped. Since we have the LLTX bit
562 * set, we can't rely on netif_stop_queue() preventing our
563 * xmit function from being called with a full queue.
564 */
565 if (unlikely(netif_queue_stopped(dev))) {
566 spin_unlock_irqrestore(&priv->tx_lock, flags);
567 return NETDEV_TX_BUSY;
568 }
569
570 if (skb->dst && skb->dst->neighbour) {
571 if (unlikely(!*to_ipoib_neigh(skb->dst->neighbour))) {
572 path_lookup(skb, dev);
573 goto out;
574 }
575
576 neigh = *to_ipoib_neigh(skb->dst->neighbour);
577
578 if (likely(neigh->ah)) {
579 ipoib_send(dev, skb, neigh->ah,
580 be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
581 goto out;
582 }
583
584 if (skb_queue_len(&neigh->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
585 spin_lock(&priv->lock);
586 __skb_queue_tail(&neigh->queue, skb);
587 spin_unlock(&priv->lock);
588 } else {
589 ++priv->stats.tx_dropped;
590 dev_kfree_skb_any(skb);
591 }
592 } else {
593 struct ipoib_pseudoheader *phdr =
594 (struct ipoib_pseudoheader *) skb->data;
595 skb_pull(skb, sizeof *phdr);
596
597 if (phdr->hwaddr[4] == 0xff) {
598 /* Add in the P_Key for multicast*/
599 phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
600 phdr->hwaddr[9] = priv->pkey & 0xff;
601
602 ipoib_mcast_send(dev, (union ib_gid *) (phdr->hwaddr + 4), skb);
603 } else {
604 /* unicast GID -- should be ARP reply */
605
606 if (be16_to_cpup((u16 *) skb->data) != ETH_P_ARP) {
607 ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x "
608 IPOIB_GID_FMT "\n",
609 skb->dst ? "neigh" : "dst",
610 be16_to_cpup((u16 *) skb->data),
611 be32_to_cpup((u32 *) phdr->hwaddr),
612 IPOIB_GID_ARG(*(union ib_gid *) (phdr->hwaddr + 4)));
613 dev_kfree_skb_any(skb);
614 ++priv->stats.tx_dropped;
615 goto out;
616 }
617
618 unicast_arp_send(skb, dev, phdr);
619 }
620 }
621
622out:
623 spin_unlock_irqrestore(&priv->tx_lock, flags);
624
625 return NETDEV_TX_OK;
626}
627
628static struct net_device_stats *ipoib_get_stats(struct net_device *dev)
629{
630 struct ipoib_dev_priv *priv = netdev_priv(dev);
631
632 return &priv->stats;
633}
634
635static void ipoib_timeout(struct net_device *dev)
636{
637 struct ipoib_dev_priv *priv = netdev_priv(dev);
638
639 ipoib_warn(priv, "transmit timeout: latency %ld\n",
640 jiffies - dev->trans_start);
641 /* XXX reset QP, etc. */
642}
643
644static int ipoib_hard_header(struct sk_buff *skb,
645 struct net_device *dev,
646 unsigned short type,
647 void *daddr, void *saddr, unsigned len)
648{
649 struct ipoib_header *header;
650
651 header = (struct ipoib_header *) skb_push(skb, sizeof *header);
652
653 header->proto = htons(type);
654 header->reserved = 0;
655
656 /*
657 * If we don't have a neighbour structure, stuff the
658 * destination address onto the front of the skb so we can
659 * figure out where to send the packet later.
660 */
661 if (!skb->dst || !skb->dst->neighbour) {
662 struct ipoib_pseudoheader *phdr =
663 (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
664 memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
665 }
666
667 return 0;
668}
669
670static void ipoib_set_mcast_list(struct net_device *dev)
671{
672 struct ipoib_dev_priv *priv = netdev_priv(dev);
673
674 schedule_work(&priv->restart_task);
675}
676
677static void ipoib_neigh_destructor(struct neighbour *n)
678{
679 struct ipoib_neigh *neigh;
680 struct ipoib_dev_priv *priv = netdev_priv(n->dev);
681 unsigned long flags;
682 struct ipoib_ah *ah = NULL;
683
684 ipoib_dbg(priv,
685 "neigh_destructor for %06x " IPOIB_GID_FMT "\n",
686 be32_to_cpup((__be32 *) n->ha),
687 IPOIB_GID_ARG(*((union ib_gid *) (n->ha + 4))));
688
689 spin_lock_irqsave(&priv->lock, flags);
690
691 neigh = *to_ipoib_neigh(n);
692 if (neigh) {
693 if (neigh->ah)
694 ah = neigh->ah;
695 list_del(&neigh->list);
696 *to_ipoib_neigh(n) = NULL;
697 kfree(neigh);
698 }
699
700 spin_unlock_irqrestore(&priv->lock, flags);
701
702 if (ah)
703 ipoib_put_ah(ah);
704}
705
706static int ipoib_neigh_setup(struct neighbour *neigh)
707{
708 /*
709 * Is this kosher? I can't find anybody in the kernel that
710 * sets neigh->destructor, so we should be able to set it here
711 * without trouble.
712 */
713 neigh->ops->destructor = ipoib_neigh_destructor;
714
715 return 0;
716}
717
718static int ipoib_neigh_setup_dev(struct net_device *dev, struct neigh_parms *parms)
719{
720 parms->neigh_setup = ipoib_neigh_setup;
721
722 return 0;
723}
724
725int ipoib_dev_init(struct net_device *dev, struct ib_device *ca, int port)
726{
727 struct ipoib_dev_priv *priv = netdev_priv(dev);
728
729 /* Allocate RX/TX "rings" to hold queued skbs */
730
731 priv->rx_ring = kmalloc(IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf),
732 GFP_KERNEL);
733 if (!priv->rx_ring) {
734 printk(KERN_WARNING "%s: failed to allocate RX ring (%d entries)\n",
735 ca->name, IPOIB_RX_RING_SIZE);
736 goto out;
737 }
738 memset(priv->rx_ring, 0,
739 IPOIB_RX_RING_SIZE * sizeof (struct ipoib_buf));
740
741 priv->tx_ring = kmalloc(IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf),
742 GFP_KERNEL);
743 if (!priv->tx_ring) {
744 printk(KERN_WARNING "%s: failed to allocate TX ring (%d entries)\n",
745 ca->name, IPOIB_TX_RING_SIZE);
746 goto out_rx_ring_cleanup;
747 }
748 memset(priv->tx_ring, 0,
749 IPOIB_TX_RING_SIZE * sizeof (struct ipoib_buf));
750
751 /* priv->tx_head & tx_tail are already 0 */
752
753 if (ipoib_ib_dev_init(dev, ca, port))
754 goto out_tx_ring_cleanup;
755
756 return 0;
757
758out_tx_ring_cleanup:
759 kfree(priv->tx_ring);
760
761out_rx_ring_cleanup:
762 kfree(priv->rx_ring);
763
764out:
765 return -ENOMEM;
766}
767
768void ipoib_dev_cleanup(struct net_device *dev)
769{
770 struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
771
772 ipoib_delete_debug_file(dev);
773
774 /* Delete any child interfaces first */
775 list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
776 unregister_netdev(cpriv->dev);
777 ipoib_dev_cleanup(cpriv->dev);
778 free_netdev(cpriv->dev);
779 }
780
781 ipoib_ib_dev_cleanup(dev);
782
783 if (priv->rx_ring) {
784 kfree(priv->rx_ring);
785 priv->rx_ring = NULL;
786 }
787
788 if (priv->tx_ring) {
789 kfree(priv->tx_ring);
790 priv->tx_ring = NULL;
791 }
792}
793
794static void ipoib_setup(struct net_device *dev)
795{
796 struct ipoib_dev_priv *priv = netdev_priv(dev);
797
798 dev->open = ipoib_open;
799 dev->stop = ipoib_stop;
800 dev->change_mtu = ipoib_change_mtu;
801 dev->hard_start_xmit = ipoib_start_xmit;
802 dev->get_stats = ipoib_get_stats;
803 dev->tx_timeout = ipoib_timeout;
804 dev->hard_header = ipoib_hard_header;
805 dev->set_multicast_list = ipoib_set_mcast_list;
806 dev->neigh_setup = ipoib_neigh_setup_dev;
807
808 dev->watchdog_timeo = HZ;
809
810 dev->rebuild_header = NULL;
811 dev->set_mac_address = NULL;
812 dev->header_cache_update = NULL;
813
814 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
815
816 /*
817 * We add in INFINIBAND_ALEN to allow for the destination
818 * address "pseudoheader" for skbs without neighbour struct.
819 */
820 dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
821 dev->addr_len = INFINIBAND_ALEN;
822 dev->type = ARPHRD_INFINIBAND;
823 dev->tx_queue_len = IPOIB_TX_RING_SIZE * 2;
824 dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
825
826 /* MTU will be reset when mcast join happens */
827 dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
828 priv->mcast_mtu = priv->admin_mtu = dev->mtu;
829
830 memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);
831
832 netif_carrier_off(dev);
833
834 SET_MODULE_OWNER(dev);
835
836 priv->dev = dev;
837
838 spin_lock_init(&priv->lock);
839 spin_lock_init(&priv->tx_lock);
840
841 init_MUTEX(&priv->mcast_mutex);
842 init_MUTEX(&priv->vlan_mutex);
843
844 INIT_LIST_HEAD(&priv->path_list);
845 INIT_LIST_HEAD(&priv->child_intfs);
846 INIT_LIST_HEAD(&priv->dead_ahs);
847 INIT_LIST_HEAD(&priv->multicast_list);
848
849 INIT_WORK(&priv->pkey_task, ipoib_pkey_poll, priv->dev);
850 INIT_WORK(&priv->mcast_task, ipoib_mcast_join_task, priv->dev);
851 INIT_WORK(&priv->flush_task, ipoib_ib_dev_flush, priv->dev);
852 INIT_WORK(&priv->restart_task, ipoib_mcast_restart_task, priv->dev);
853 INIT_WORK(&priv->ah_reap_task, ipoib_reap_ah, priv->dev);
854}
855
856struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
857{
858 struct net_device *dev;
859
860 dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
861 ipoib_setup);
862 if (!dev)
863 return NULL;
864
865 return netdev_priv(dev);
866}
867
868static ssize_t show_pkey(struct class_device *cdev, char *buf)
869{
870 struct ipoib_dev_priv *priv =
871 netdev_priv(container_of(cdev, struct net_device, class_dev));
872
873 return sprintf(buf, "0x%04x\n", priv->pkey);
874}
875static CLASS_DEVICE_ATTR(pkey, S_IRUGO, show_pkey, NULL);
876
877static ssize_t create_child(struct class_device *cdev,
878 const char *buf, size_t count)
879{
880 int pkey;
881 int ret;
882
883 if (sscanf(buf, "%i", &pkey) != 1)
884 return -EINVAL;
885
886 if (pkey < 0 || pkey > 0xffff)
887 return -EINVAL;
888
889 ret = ipoib_vlan_add(container_of(cdev, struct net_device, class_dev),
890 pkey);
891
892 return ret ? ret : count;
893}
894static CLASS_DEVICE_ATTR(create_child, S_IWUGO, NULL, create_child);
895
896static ssize_t delete_child(struct class_device *cdev,
897 const char *buf, size_t count)
898{
899 int pkey;
900 int ret;
901
902 if (sscanf(buf, "%i", &pkey) != 1)
903 return -EINVAL;
904
905 if (pkey < 0 || pkey > 0xffff)
906 return -EINVAL;
907
908 ret = ipoib_vlan_delete(container_of(cdev, struct net_device, class_dev),
909 pkey);
910
911 return ret ? ret : count;
912
913}
914static CLASS_DEVICE_ATTR(delete_child, S_IWUGO, NULL, delete_child);
915
916int ipoib_add_pkey_attr(struct net_device *dev)
917{
918 return class_device_create_file(&dev->class_dev,
919 &class_device_attr_pkey);
920}
921
922static struct net_device *ipoib_add_port(const char *format,
923 struct ib_device *hca, u8 port)
924{
925 struct ipoib_dev_priv *priv;
926 int result = -ENOMEM;
927
928 priv = ipoib_intf_alloc(format);
929 if (!priv)
930 goto alloc_mem_failed;
931
932 SET_NETDEV_DEV(priv->dev, hca->dma_device);
933
934 result = ib_query_pkey(hca, port, 0, &priv->pkey);
935 if (result) {
936 printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
937 hca->name, port, result);
938 goto alloc_mem_failed;
939 }
940
941 priv->dev->broadcast[8] = priv->pkey >> 8;
942 priv->dev->broadcast[9] = priv->pkey & 0xff;
943
944 result = ib_query_gid(hca, port, 0, &priv->local_gid);
945 if (result) {
946 printk(KERN_WARNING "%s: ib_query_gid port %d failed (ret = %d)\n",
947 hca->name, port, result);
948 goto alloc_mem_failed;
949 } else
950 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
951
952
953 result = ipoib_dev_init(priv->dev, hca, port);
954 if (result < 0) {
955 printk(KERN_WARNING "%s: failed to initialize port %d (ret = %d)\n",
956 hca->name, port, result);
957 goto device_init_failed;
958 }
959
960 INIT_IB_EVENT_HANDLER(&priv->event_handler,
961 priv->ca, ipoib_event);
962 result = ib_register_event_handler(&priv->event_handler);
963 if (result < 0) {
964 printk(KERN_WARNING "%s: ib_register_event_handler failed for "
965 "port %d (ret = %d)\n",
966 hca->name, port, result);
967 goto event_failed;
968 }
969
970 result = register_netdev(priv->dev);
971 if (result) {
972 printk(KERN_WARNING "%s: couldn't register ipoib port %d; error %d\n",
973 hca->name, port, result);
974 goto register_failed;
975 }
976
977 if (ipoib_create_debug_file(priv->dev))
978 goto debug_failed;
979
980 if (ipoib_add_pkey_attr(priv->dev))
981 goto sysfs_failed;
982 if (class_device_create_file(&priv->dev->class_dev,
983 &class_device_attr_create_child))
984 goto sysfs_failed;
985 if (class_device_create_file(&priv->dev->class_dev,
986 &class_device_attr_delete_child))
987 goto sysfs_failed;
988
989 return priv->dev;
990
991sysfs_failed:
992 ipoib_delete_debug_file(priv->dev);
993
994debug_failed:
995 unregister_netdev(priv->dev);
996
997register_failed:
998 ib_unregister_event_handler(&priv->event_handler);
999
1000event_failed:
1001 ipoib_dev_cleanup(priv->dev);
1002
1003device_init_failed:
1004 free_netdev(priv->dev);
1005
1006alloc_mem_failed:
1007 return ERR_PTR(result);
1008}
1009
1010static void ipoib_add_one(struct ib_device *device)
1011{
1012 struct list_head *dev_list;
1013 struct net_device *dev;
1014 struct ipoib_dev_priv *priv;
1015 int s, e, p;
1016
1017 dev_list = kmalloc(sizeof *dev_list, GFP_KERNEL);
1018 if (!dev_list)
1019 return;
1020
1021 INIT_LIST_HEAD(dev_list);
1022
1023 if (device->node_type == IB_NODE_SWITCH) {
1024 s = 0;
1025 e = 0;
1026 } else {
1027 s = 1;
1028 e = device->phys_port_cnt;
1029 }
1030
1031 for (p = s; p <= e; ++p) {
1032 dev = ipoib_add_port("ib%d", device, p);
1033 if (!IS_ERR(dev)) {
1034 priv = netdev_priv(dev);
1035 list_add_tail(&priv->list, dev_list);
1036 }
1037 }
1038
1039 ib_set_client_data(device, &ipoib_client, dev_list);
1040}
1041
1042static void ipoib_remove_one(struct ib_device *device)
1043{
1044 struct ipoib_dev_priv *priv, *tmp;
1045 struct list_head *dev_list;
1046
1047 dev_list = ib_get_client_data(device, &ipoib_client);
1048
1049 list_for_each_entry_safe(priv, tmp, dev_list, list) {
1050 ib_unregister_event_handler(&priv->event_handler);
1051
1052 unregister_netdev(priv->dev);
1053 ipoib_dev_cleanup(priv->dev);
1054 free_netdev(priv->dev);
1055 }
1056}
1057
1058static int __init ipoib_init_module(void)
1059{
1060 int ret;
1061
1062 ret = ipoib_register_debugfs();
1063 if (ret)
1064 return ret;
1065
1066 /*
1067 * We create our own workqueue mainly because we want to be
1068 * able to flush it when devices are being removed. We can't
1069 * use schedule_work()/flush_scheduled_work() because both
1070 * unregister_netdev() and linkwatch_event take the rtnl lock,
1071 * so flush_scheduled_work() can deadlock during device
1072 * removal.
1073 */
1074 ipoib_workqueue = create_singlethread_workqueue("ipoib");
1075 if (!ipoib_workqueue) {
1076 ret = -ENOMEM;
1077 goto err_fs;
1078 }
1079
1080 ret = ib_register_client(&ipoib_client);
1081 if (ret)
1082 goto err_wq;
1083
1084 return 0;
1085
1086err_fs:
1087 ipoib_unregister_debugfs();
1088
1089err_wq:
1090 destroy_workqueue(ipoib_workqueue);
1091
1092 return ret;
1093}
1094
1095static void __exit ipoib_cleanup_module(void)
1096{
1097 ipoib_unregister_debugfs();
1098 ib_unregister_client(&ipoib_client);
1099 destroy_workqueue(ipoib_workqueue);
1100}
1101
1102module_init(ipoib_init_module);
1103module_exit(ipoib_cleanup_module);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
new file mode 100644
index 000000000000..f46932dc81c9
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -0,0 +1,991 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_multicast.c 1362 2004-12-18 15:56:29Z roland $
33 */
34
35#include <linux/skbuff.h>
36#include <linux/rtnetlink.h>
37#include <linux/ip.h>
38#include <linux/in.h>
39#include <linux/igmp.h>
40#include <linux/inetdevice.h>
41#include <linux/delay.h>
42#include <linux/completion.h>
43
44#include "ipoib.h"
45
46#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
47static int mcast_debug_level;
48
49module_param(mcast_debug_level, int, 0644);
50MODULE_PARM_DESC(mcast_debug_level,
51 "Enable multicast debug tracing if > 0");
52#endif
53
54static DECLARE_MUTEX(mcast_mutex);
55
56/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
57struct ipoib_mcast {
58 struct ib_sa_mcmember_rec mcmember;
59 struct ipoib_ah *ah;
60
61 struct rb_node rb_node;
62 struct list_head list;
63 struct completion done;
64
65 int query_id;
66 struct ib_sa_query *query;
67
68 unsigned long created;
69 unsigned long backoff;
70
71 unsigned long flags;
72 unsigned char logcount;
73
74 struct list_head neigh_list;
75
76 struct sk_buff_head pkt_queue;
77
78 struct net_device *dev;
79};
80
81struct ipoib_mcast_iter {
82 struct net_device *dev;
83 union ib_gid mgid;
84 unsigned long created;
85 unsigned int queuelen;
86 unsigned int complete;
87 unsigned int send_only;
88};
89
90static void ipoib_mcast_free(struct ipoib_mcast *mcast)
91{
92 struct net_device *dev = mcast->dev;
93 struct ipoib_dev_priv *priv = netdev_priv(dev);
94 struct ipoib_neigh *neigh, *tmp;
95 unsigned long flags;
96 LIST_HEAD(ah_list);
97 struct ipoib_ah *ah, *tah;
98
99 ipoib_dbg_mcast(netdev_priv(dev),
100 "deleting multicast group " IPOIB_GID_FMT "\n",
101 IPOIB_GID_ARG(mcast->mcmember.mgid));
102
103 spin_lock_irqsave(&priv->lock, flags);
104
105 list_for_each_entry_safe(neigh, tmp, &mcast->neigh_list, list) {
106 if (neigh->ah)
107 list_add_tail(&neigh->ah->list, &ah_list);
108 *to_ipoib_neigh(neigh->neighbour) = NULL;
109 neigh->neighbour->ops->destructor = NULL;
110 kfree(neigh);
111 }
112
113 spin_unlock_irqrestore(&priv->lock, flags);
114
115 list_for_each_entry_safe(ah, tah, &ah_list, list)
116 ipoib_put_ah(ah);
117
118 if (mcast->ah)
119 ipoib_put_ah(mcast->ah);
120
121 while (!skb_queue_empty(&mcast->pkt_queue)) {
122 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
123
124 skb->dev = dev;
125 dev_kfree_skb_any(skb);
126 }
127
128 kfree(mcast);
129}
130
131static struct ipoib_mcast *ipoib_mcast_alloc(struct net_device *dev,
132 int can_sleep)
133{
134 struct ipoib_mcast *mcast;
135
136 mcast = kmalloc(sizeof (*mcast), can_sleep ? GFP_KERNEL : GFP_ATOMIC);
137 if (!mcast)
138 return NULL;
139
140 memset(mcast, 0, sizeof (*mcast));
141
142 init_completion(&mcast->done);
143
144 mcast->dev = dev;
145 mcast->created = jiffies;
146 mcast->backoff = HZ;
147 mcast->logcount = 0;
148
149 INIT_LIST_HEAD(&mcast->list);
150 INIT_LIST_HEAD(&mcast->neigh_list);
151 skb_queue_head_init(&mcast->pkt_queue);
152
153 mcast->ah = NULL;
154 mcast->query = NULL;
155
156 return mcast;
157}
158
159static struct ipoib_mcast *__ipoib_mcast_find(struct net_device *dev, union ib_gid *mgid)
160{
161 struct ipoib_dev_priv *priv = netdev_priv(dev);
162 struct rb_node *n = priv->multicast_tree.rb_node;
163
164 while (n) {
165 struct ipoib_mcast *mcast;
166 int ret;
167
168 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
169
170 ret = memcmp(mgid->raw, mcast->mcmember.mgid.raw,
171 sizeof (union ib_gid));
172 if (ret < 0)
173 n = n->rb_left;
174 else if (ret > 0)
175 n = n->rb_right;
176 else
177 return mcast;
178 }
179
180 return NULL;
181}
182
183static int __ipoib_mcast_add(struct net_device *dev, struct ipoib_mcast *mcast)
184{
185 struct ipoib_dev_priv *priv = netdev_priv(dev);
186 struct rb_node **n = &priv->multicast_tree.rb_node, *pn = NULL;
187
188 while (*n) {
189 struct ipoib_mcast *tmcast;
190 int ret;
191
192 pn = *n;
193 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node);
194
195 ret = memcmp(mcast->mcmember.mgid.raw, tmcast->mcmember.mgid.raw,
196 sizeof (union ib_gid));
197 if (ret < 0)
198 n = &pn->rb_left;
199 else if (ret > 0)
200 n = &pn->rb_right;
201 else
202 return -EEXIST;
203 }
204
205 rb_link_node(&mcast->rb_node, pn, n);
206 rb_insert_color(&mcast->rb_node, &priv->multicast_tree);
207
208 return 0;
209}
210
211static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
212 struct ib_sa_mcmember_rec *mcmember)
213{
214 struct net_device *dev = mcast->dev;
215 struct ipoib_dev_priv *priv = netdev_priv(dev);
216 int ret;
217
218 mcast->mcmember = *mcmember;
219
220 /* Set the cached Q_Key before we attach if it's the broadcast group */
221 if (!memcmp(mcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
222 sizeof (union ib_gid))) {
223 priv->qkey = be32_to_cpu(priv->broadcast->mcmember.qkey);
224 priv->tx_wr.wr.ud.remote_qkey = priv->qkey;
225 }
226
227 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
228 if (test_and_set_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
229 ipoib_warn(priv, "multicast group " IPOIB_GID_FMT
230 " already attached\n",
231 IPOIB_GID_ARG(mcast->mcmember.mgid));
232
233 return 0;
234 }
235
236 ret = ipoib_mcast_attach(dev, be16_to_cpu(mcast->mcmember.mlid),
237 &mcast->mcmember.mgid);
238 if (ret < 0) {
239 ipoib_warn(priv, "couldn't attach QP to multicast group "
240 IPOIB_GID_FMT "\n",
241 IPOIB_GID_ARG(mcast->mcmember.mgid));
242
243 clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags);
244 return ret;
245 }
246 }
247
248 {
249 struct ib_ah_attr av = {
250 .dlid = be16_to_cpu(mcast->mcmember.mlid),
251 .port_num = priv->port,
252 .sl = mcast->mcmember.sl,
253 .ah_flags = IB_AH_GRH,
254 .grh = {
255 .flow_label = be32_to_cpu(mcast->mcmember.flow_label),
256 .hop_limit = mcast->mcmember.hop_limit,
257 .sgid_index = 0,
258 .traffic_class = mcast->mcmember.traffic_class
259 }
260 };
261
262 av.grh.dgid = mcast->mcmember.mgid;
263
264 if (ib_sa_rate_enum_to_int(mcast->mcmember.rate) > 0)
265 av.static_rate = (2 * priv->local_rate -
266 ib_sa_rate_enum_to_int(mcast->mcmember.rate) - 1) /
267 (priv->local_rate ? priv->local_rate : 1);
268
269 ipoib_dbg_mcast(priv, "static_rate %d for local port %dX, mcmember %dX\n",
270 av.static_rate, priv->local_rate,
271 ib_sa_rate_enum_to_int(mcast->mcmember.rate));
272
273 mcast->ah = ipoib_create_ah(dev, priv->pd, &av);
274 if (!mcast->ah) {
275 ipoib_warn(priv, "ib_address_create failed\n");
276 } else {
277 ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
278 " AV %p, LID 0x%04x, SL %d\n",
279 IPOIB_GID_ARG(mcast->mcmember.mgid),
280 mcast->ah->ah,
281 be16_to_cpu(mcast->mcmember.mlid),
282 mcast->mcmember.sl);
283 }
284 }
285
286 /* actually send any queued packets */
287 while (!skb_queue_empty(&mcast->pkt_queue)) {
288 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
289
290 skb->dev = dev;
291
292 if (!skb->dst || !skb->dst->neighbour) {
293 /* put pseudoheader back on for next time */
294 skb_push(skb, sizeof (struct ipoib_pseudoheader));
295 }
296
297 if (dev_queue_xmit(skb))
298 ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
299 }
300
301 return 0;
302}
303
304static void
305ipoib_mcast_sendonly_join_complete(int status,
306 struct ib_sa_mcmember_rec *mcmember,
307 void *mcast_ptr)
308{
309 struct ipoib_mcast *mcast = mcast_ptr;
310 struct net_device *dev = mcast->dev;
311
312 if (!status)
313 ipoib_mcast_join_finish(mcast, mcmember);
314 else {
315 if (mcast->logcount++ < 20)
316 ipoib_dbg_mcast(netdev_priv(dev), "multicast join failed for "
317 IPOIB_GID_FMT ", status %d\n",
318 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
319
320 /* Flush out any queued packets */
321 while (!skb_queue_empty(&mcast->pkt_queue)) {
322 struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
323
324 skb->dev = dev;
325
326 dev_kfree_skb_any(skb);
327 }
328
329 /* Clear the busy flag so we try again */
330 clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
331 }
332
333 complete(&mcast->done);
334}
335
336static int ipoib_mcast_sendonly_join(struct ipoib_mcast *mcast)
337{
338 struct net_device *dev = mcast->dev;
339 struct ipoib_dev_priv *priv = netdev_priv(dev);
340 struct ib_sa_mcmember_rec rec = {
341#if 0 /* Some SMs don't support send-only yet */
342 .join_state = 4
343#else
344 .join_state = 1
345#endif
346 };
347 int ret = 0;
348
349 if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)) {
350 ipoib_dbg_mcast(priv, "device shutting down, no multicast joins\n");
351 return -ENODEV;
352 }
353
354 if (test_and_set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)) {
355 ipoib_dbg_mcast(priv, "multicast entry busy, skipping\n");
356 return -EBUSY;
357 }
358
359 rec.mgid = mcast->mcmember.mgid;
360 rec.port_gid = priv->local_gid;
361 rec.pkey = be16_to_cpu(priv->pkey);
362
363 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec,
364 IB_SA_MCMEMBER_REC_MGID |
365 IB_SA_MCMEMBER_REC_PORT_GID |
366 IB_SA_MCMEMBER_REC_PKEY |
367 IB_SA_MCMEMBER_REC_JOIN_STATE,
368 1000, GFP_ATOMIC,
369 ipoib_mcast_sendonly_join_complete,
370 mcast, &mcast->query);
371 if (ret < 0) {
372 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed (ret = %d)\n",
373 ret);
374 } else {
375 ipoib_dbg_mcast(priv, "no multicast record for " IPOIB_GID_FMT
376 ", starting join\n",
377 IPOIB_GID_ARG(mcast->mcmember.mgid));
378
379 mcast->query_id = ret;
380 }
381
382 return ret;
383}
384
385static void ipoib_mcast_join_complete(int status,
386 struct ib_sa_mcmember_rec *mcmember,
387 void *mcast_ptr)
388{
389 struct ipoib_mcast *mcast = mcast_ptr;
390 struct net_device *dev = mcast->dev;
391 struct ipoib_dev_priv *priv = netdev_priv(dev);
392
393 ipoib_dbg_mcast(priv, "join completion for " IPOIB_GID_FMT
394 " (status %d)\n",
395 IPOIB_GID_ARG(mcast->mcmember.mgid), status);
396
397 if (!status && !ipoib_mcast_join_finish(mcast, mcmember)) {
398 mcast->backoff = HZ;
399 down(&mcast_mutex);
400 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
401 queue_work(ipoib_workqueue, &priv->mcast_task);
402 up(&mcast_mutex);
403 complete(&mcast->done);
404 return;
405 }
406
407 if (status == -EINTR) {
408 complete(&mcast->done);
409 return;
410 }
411
412 if (status && mcast->logcount++ < 20) {
413 if (status == -ETIMEDOUT || status == -EINTR) {
414 ipoib_dbg_mcast(priv, "multicast join failed for " IPOIB_GID_FMT
415 ", status %d\n",
416 IPOIB_GID_ARG(mcast->mcmember.mgid),
417 status);
418 } else {
419 ipoib_warn(priv, "multicast join failed for "
420 IPOIB_GID_FMT ", status %d\n",
421 IPOIB_GID_ARG(mcast->mcmember.mgid),
422 status);
423 }
424 }
425
426 mcast->backoff *= 2;
427 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
428 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
429
430 mcast->query = NULL;
431
432 down(&mcast_mutex);
433 if (test_bit(IPOIB_MCAST_RUN, &priv->flags)) {
434 if (status == -ETIMEDOUT)
435 queue_work(ipoib_workqueue, &priv->mcast_task);
436 else
437 queue_delayed_work(ipoib_workqueue, &priv->mcast_task,
438 mcast->backoff * HZ);
439 } else
440 complete(&mcast->done);
441 up(&mcast_mutex);
442
443 return;
444}
445
446static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast,
447 int create)
448{
449 struct ipoib_dev_priv *priv = netdev_priv(dev);
450 struct ib_sa_mcmember_rec rec = {
451 .join_state = 1
452 };
453 ib_sa_comp_mask comp_mask;
454 int ret = 0;
455
456 ipoib_dbg_mcast(priv, "joining MGID " IPOIB_GID_FMT "\n",
457 IPOIB_GID_ARG(mcast->mcmember.mgid));
458
459 rec.mgid = mcast->mcmember.mgid;
460 rec.port_gid = priv->local_gid;
461 rec.pkey = be16_to_cpu(priv->pkey);
462
463 comp_mask =
464 IB_SA_MCMEMBER_REC_MGID |
465 IB_SA_MCMEMBER_REC_PORT_GID |
466 IB_SA_MCMEMBER_REC_PKEY |
467 IB_SA_MCMEMBER_REC_JOIN_STATE;
468
469 if (create) {
470 comp_mask |=
471 IB_SA_MCMEMBER_REC_QKEY |
472 IB_SA_MCMEMBER_REC_SL |
473 IB_SA_MCMEMBER_REC_FLOW_LABEL |
474 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
475
476 rec.qkey = priv->broadcast->mcmember.qkey;
477 rec.sl = priv->broadcast->mcmember.sl;
478 rec.flow_label = priv->broadcast->mcmember.flow_label;
479 rec.traffic_class = priv->broadcast->mcmember.traffic_class;
480 }
481
482 ret = ib_sa_mcmember_rec_set(priv->ca, priv->port, &rec, comp_mask,
483 mcast->backoff * 1000, GFP_ATOMIC,
484 ipoib_mcast_join_complete,
485 mcast, &mcast->query);
486
487 if (ret < 0) {
488 ipoib_warn(priv, "ib_sa_mcmember_rec_set failed, status %d\n", ret);
489
490 mcast->backoff *= 2;
491 if (mcast->backoff > IPOIB_MAX_BACKOFF_SECONDS)
492 mcast->backoff = IPOIB_MAX_BACKOFF_SECONDS;
493
494 down(&mcast_mutex);
495 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
496 queue_delayed_work(ipoib_workqueue,
497 &priv->mcast_task,
498 mcast->backoff);
499 up(&mcast_mutex);
500 } else
501 mcast->query_id = ret;
502}
503
504void ipoib_mcast_join_task(void *dev_ptr)
505{
506 struct net_device *dev = dev_ptr;
507 struct ipoib_dev_priv *priv = netdev_priv(dev);
508
509 if (!test_bit(IPOIB_MCAST_RUN, &priv->flags))
510 return;
511
512 if (ib_query_gid(priv->ca, priv->port, 0, &priv->local_gid))
513 ipoib_warn(priv, "ib_gid_entry_get() failed\n");
514 else
515 memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, sizeof (union ib_gid));
516
517 {
518 struct ib_port_attr attr;
519
520 if (!ib_query_port(priv->ca, priv->port, &attr)) {
521 priv->local_lid = attr.lid;
522 priv->local_rate = attr.active_speed *
523 ib_width_enum_to_int(attr.active_width);
524 } else
525 ipoib_warn(priv, "ib_query_port failed\n");
526 }
527
528 if (!priv->broadcast) {
529 priv->broadcast = ipoib_mcast_alloc(dev, 1);
530 if (!priv->broadcast) {
531 ipoib_warn(priv, "failed to allocate broadcast group\n");
532 down(&mcast_mutex);
533 if (test_bit(IPOIB_MCAST_RUN, &priv->flags))
534 queue_delayed_work(ipoib_workqueue,
535 &priv->mcast_task, HZ);
536 up(&mcast_mutex);
537 return;
538 }
539
540 memcpy(priv->broadcast->mcmember.mgid.raw, priv->dev->broadcast + 4,
541 sizeof (union ib_gid));
542
543 spin_lock_irq(&priv->lock);
544 __ipoib_mcast_add(dev, priv->broadcast);
545 spin_unlock_irq(&priv->lock);
546 }
547
548 if (!test_bit(IPOIB_MCAST_FLAG_ATTACHED, &priv->broadcast->flags)) {
549 ipoib_mcast_join(dev, priv->broadcast, 0);
550 return;
551 }
552
553 while (1) {
554 struct ipoib_mcast *mcast = NULL;
555
556 spin_lock_irq(&priv->lock);
557 list_for_each_entry(mcast, &priv->multicast_list, list) {
558 if (!test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)
559 && !test_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags)
560 && !test_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags)) {
561 /* Found the next unjoined group */
562 break;
563 }
564 }
565 spin_unlock_irq(&priv->lock);
566
567 if (&mcast->list == &priv->multicast_list) {
568 /* All done */
569 break;
570 }
571
572 ipoib_mcast_join(dev, mcast, 1);
573 return;
574 }
575
576 priv->mcast_mtu = ib_mtu_enum_to_int(priv->broadcast->mcmember.mtu) -
577 IPOIB_ENCAP_LEN;
578 dev->mtu = min(priv->mcast_mtu, priv->admin_mtu);
579
580 ipoib_dbg_mcast(priv, "successfully joined all multicast groups\n");
581
582 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
583 netif_carrier_on(dev);
584}
585
586int ipoib_mcast_start_thread(struct net_device *dev)
587{
588 struct ipoib_dev_priv *priv = netdev_priv(dev);
589
590 ipoib_dbg_mcast(priv, "starting multicast thread\n");
591
592 down(&mcast_mutex);
593 if (!test_and_set_bit(IPOIB_MCAST_RUN, &priv->flags))
594 queue_work(ipoib_workqueue, &priv->mcast_task);
595 up(&mcast_mutex);
596
597 return 0;
598}
599
600int ipoib_mcast_stop_thread(struct net_device *dev)
601{
602 struct ipoib_dev_priv *priv = netdev_priv(dev);
603 struct ipoib_mcast *mcast;
604
605 ipoib_dbg_mcast(priv, "stopping multicast thread\n");
606
607 down(&mcast_mutex);
608 clear_bit(IPOIB_MCAST_RUN, &priv->flags);
609 cancel_delayed_work(&priv->mcast_task);
610 up(&mcast_mutex);
611
612 flush_workqueue(ipoib_workqueue);
613
614 if (priv->broadcast && priv->broadcast->query) {
615 ib_sa_cancel_query(priv->broadcast->query_id, priv->broadcast->query);
616 priv->broadcast->query = NULL;
617 ipoib_dbg_mcast(priv, "waiting for bcast\n");
618 wait_for_completion(&priv->broadcast->done);
619 }
620
621 list_for_each_entry(mcast, &priv->multicast_list, list) {
622 if (mcast->query) {
623 ib_sa_cancel_query(mcast->query_id, mcast->query);
624 mcast->query = NULL;
625 ipoib_dbg_mcast(priv, "waiting for MGID " IPOIB_GID_FMT "\n",
626 IPOIB_GID_ARG(mcast->mcmember.mgid));
627 wait_for_completion(&mcast->done);
628 }
629 }
630
631 return 0;
632}
633
634static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast)
635{
636 struct ipoib_dev_priv *priv = netdev_priv(dev);
637 struct ib_sa_mcmember_rec rec = {
638 .join_state = 1
639 };
640 int ret = 0;
641
642 if (!test_and_clear_bit(IPOIB_MCAST_FLAG_ATTACHED, &mcast->flags))
643 return 0;
644
645 ipoib_dbg_mcast(priv, "leaving MGID " IPOIB_GID_FMT "\n",
646 IPOIB_GID_ARG(mcast->mcmember.mgid));
647
648 rec.mgid = mcast->mcmember.mgid;
649 rec.port_gid = priv->local_gid;
650 rec.pkey = be16_to_cpu(priv->pkey);
651
652 /* Remove ourselves from the multicast group */
653 ret = ipoib_mcast_detach(dev, be16_to_cpu(mcast->mcmember.mlid),
654 &mcast->mcmember.mgid);
655 if (ret)
656 ipoib_warn(priv, "ipoib_mcast_detach failed (result = %d)\n", ret);
657
658 /*
659 * Just make one shot at leaving and don't wait for a reply;
660 * if we fail, too bad.
661 */
662 ret = ib_sa_mcmember_rec_delete(priv->ca, priv->port, &rec,
663 IB_SA_MCMEMBER_REC_MGID |
664 IB_SA_MCMEMBER_REC_PORT_GID |
665 IB_SA_MCMEMBER_REC_PKEY |
666 IB_SA_MCMEMBER_REC_JOIN_STATE,
667 0, GFP_ATOMIC, NULL,
668 mcast, &mcast->query);
669 if (ret < 0)
670 ipoib_warn(priv, "ib_sa_mcmember_rec_delete failed "
671 "for leave (result = %d)\n", ret);
672
673 return 0;
674}
675
676void ipoib_mcast_send(struct net_device *dev, union ib_gid *mgid,
677 struct sk_buff *skb)
678{
679 struct ipoib_dev_priv *priv = netdev_priv(dev);
680 struct ipoib_mcast *mcast;
681
682 /*
683 * We can only be called from ipoib_start_xmit, so we're
684 * inside tx_lock -- no need to save/restore flags.
685 */
686 spin_lock(&priv->lock);
687
688 mcast = __ipoib_mcast_find(dev, mgid);
689 if (!mcast) {
690 /* Let's create a new send only group now */
691 ipoib_dbg_mcast(priv, "setting up send only multicast group for "
692 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(*mgid));
693
694 mcast = ipoib_mcast_alloc(dev, 0);
695 if (!mcast) {
696 ipoib_warn(priv, "unable to allocate memory for "
697 "multicast structure\n");
698 dev_kfree_skb_any(skb);
699 goto out;
700 }
701
702 set_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags);
703 mcast->mcmember.mgid = *mgid;
704 __ipoib_mcast_add(dev, mcast);
705 list_add_tail(&mcast->list, &priv->multicast_list);
706 }
707
708 if (!mcast->ah) {
709 if (skb_queue_len(&mcast->pkt_queue) < IPOIB_MAX_MCAST_QUEUE)
710 skb_queue_tail(&mcast->pkt_queue, skb);
711 else
712 dev_kfree_skb_any(skb);
713
714 if (mcast->query)
715 ipoib_dbg_mcast(priv, "no address vector, "
716 "but multicast join already started\n");
717 else if (test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags))
718 ipoib_mcast_sendonly_join(mcast);
719
720 /*
721 * If lookup completes between here and out:, don't
722 * want to send packet twice.
723 */
724 mcast = NULL;
725 }
726
727out:
728 if (mcast && mcast->ah) {
729 if (skb->dst &&
730 skb->dst->neighbour &&
731 !*to_ipoib_neigh(skb->dst->neighbour)) {
732 struct ipoib_neigh *neigh = kmalloc(sizeof *neigh, GFP_ATOMIC);
733
734 if (neigh) {
735 kref_get(&mcast->ah->ref);
736 neigh->ah = mcast->ah;
737 neigh->neighbour = skb->dst->neighbour;
738 *to_ipoib_neigh(skb->dst->neighbour) = neigh;
739 list_add_tail(&neigh->list, &mcast->neigh_list);
740 }
741 }
742
743 ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
744 }
745
746 spin_unlock(&priv->lock);
747}
748
749void ipoib_mcast_dev_flush(struct net_device *dev)
750{
751 struct ipoib_dev_priv *priv = netdev_priv(dev);
752 LIST_HEAD(remove_list);
753 struct ipoib_mcast *mcast, *tmcast, *nmcast;
754 unsigned long flags;
755
756 ipoib_dbg_mcast(priv, "flushing multicast list\n");
757
758 spin_lock_irqsave(&priv->lock, flags);
759 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
760 nmcast = ipoib_mcast_alloc(dev, 0);
761 if (nmcast) {
762 nmcast->flags =
763 mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY);
764
765 nmcast->mcmember.mgid = mcast->mcmember.mgid;
766
767 /* Add the new group in before the to-be-destroyed group */
768 list_add_tail(&nmcast->list, &mcast->list);
769 list_del_init(&mcast->list);
770
771 rb_replace_node(&mcast->rb_node, &nmcast->rb_node,
772 &priv->multicast_tree);
773
774 list_add_tail(&mcast->list, &remove_list);
775 } else {
776 ipoib_warn(priv, "could not reallocate multicast group "
777 IPOIB_GID_FMT "\n",
778 IPOIB_GID_ARG(mcast->mcmember.mgid));
779 }
780 }
781
782 if (priv->broadcast) {
783 nmcast = ipoib_mcast_alloc(dev, 0);
784 if (nmcast) {
785 nmcast->mcmember.mgid = priv->broadcast->mcmember.mgid;
786
787 rb_replace_node(&priv->broadcast->rb_node,
788 &nmcast->rb_node,
789 &priv->multicast_tree);
790
791 list_add_tail(&priv->broadcast->list, &remove_list);
792 }
793
794 priv->broadcast = nmcast;
795 }
796
797 spin_unlock_irqrestore(&priv->lock, flags);
798
799 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
800 ipoib_mcast_leave(dev, mcast);
801 ipoib_mcast_free(mcast);
802 }
803}
804
805void ipoib_mcast_dev_down(struct net_device *dev)
806{
807 struct ipoib_dev_priv *priv = netdev_priv(dev);
808 unsigned long flags;
809
810 /* Delete broadcast since it will be recreated */
811 if (priv->broadcast) {
812 ipoib_dbg_mcast(priv, "deleting broadcast group\n");
813
814 spin_lock_irqsave(&priv->lock, flags);
815 rb_erase(&priv->broadcast->rb_node, &priv->multicast_tree);
816 spin_unlock_irqrestore(&priv->lock, flags);
817 ipoib_mcast_leave(dev, priv->broadcast);
818 ipoib_mcast_free(priv->broadcast);
819 priv->broadcast = NULL;
820 }
821}
822
823void ipoib_mcast_restart_task(void *dev_ptr)
824{
825 struct net_device *dev = dev_ptr;
826 struct ipoib_dev_priv *priv = netdev_priv(dev);
827 struct dev_mc_list *mclist;
828 struct ipoib_mcast *mcast, *tmcast;
829 LIST_HEAD(remove_list);
830 unsigned long flags;
831
832 ipoib_dbg_mcast(priv, "restarting multicast task\n");
833
834 ipoib_mcast_stop_thread(dev);
835
836 spin_lock_irqsave(&priv->lock, flags);
837
838 /*
839 * Unfortunately, the networking core only gives us a list of all of
840 * the multicast hardware addresses. We need to figure out which ones
841 * are new and which ones have been removed
842 */
843
844 /* Clear out the found flag */
845 list_for_each_entry(mcast, &priv->multicast_list, list)
846 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
847
848 /* Mark all of the entries that are found or don't exist */
849 for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
850 union ib_gid mgid;
851
852 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
853
854 /* Add in the P_Key */
855 mgid.raw[4] = (priv->pkey >> 8) & 0xff;
856 mgid.raw[5] = priv->pkey & 0xff;
857
858 mcast = __ipoib_mcast_find(dev, &mgid);
859 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
860 struct ipoib_mcast *nmcast;
861
862 /* Not found or send-only group, let's add a new entry */
863 ipoib_dbg_mcast(priv, "adding multicast entry for mgid "
864 IPOIB_GID_FMT "\n", IPOIB_GID_ARG(mgid));
865
866 nmcast = ipoib_mcast_alloc(dev, 0);
867 if (!nmcast) {
868 ipoib_warn(priv, "unable to allocate memory for multicast structure\n");
869 continue;
870 }
871
872 set_bit(IPOIB_MCAST_FLAG_FOUND, &nmcast->flags);
873
874 nmcast->mcmember.mgid = mgid;
875
876 if (mcast) {
877 /* Destroy the send only entry */
878 list_del(&mcast->list);
879 list_add_tail(&mcast->list, &remove_list);
880
881 rb_replace_node(&mcast->rb_node,
882 &nmcast->rb_node,
883 &priv->multicast_tree);
884 } else
885 __ipoib_mcast_add(dev, nmcast);
886
887 list_add_tail(&nmcast->list, &priv->multicast_list);
888 }
889
890 if (mcast)
891 set_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
892 }
893
894 /* Remove all of the entries don't exist anymore */
895 list_for_each_entry_safe(mcast, tmcast, &priv->multicast_list, list) {
896 if (!test_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags) &&
897 !test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
898 ipoib_dbg_mcast(priv, "deleting multicast group " IPOIB_GID_FMT "\n",
899 IPOIB_GID_ARG(mcast->mcmember.mgid));
900
901 rb_erase(&mcast->rb_node, &priv->multicast_tree);
902
903 /* Move to the remove list */
904 list_del(&mcast->list);
905 list_add_tail(&mcast->list, &remove_list);
906 }
907 }
908 spin_unlock_irqrestore(&priv->lock, flags);
909
910 /* We have to cancel outside of the spinlock */
911 list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
912 ipoib_mcast_leave(mcast->dev, mcast);
913 ipoib_mcast_free(mcast);
914 }
915
916 if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
917 ipoib_mcast_start_thread(dev);
918}
919
920struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
921{
922 struct ipoib_mcast_iter *iter;
923
924 iter = kmalloc(sizeof *iter, GFP_KERNEL);
925 if (!iter)
926 return NULL;
927
928 iter->dev = dev;
929 memset(iter->mgid.raw, 0, sizeof iter->mgid);
930
931 if (ipoib_mcast_iter_next(iter)) {
932 ipoib_mcast_iter_free(iter);
933 return NULL;
934 }
935
936 return iter;
937}
938
939void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter)
940{
941 kfree(iter);
942}
943
944int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
945{
946 struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
947 struct rb_node *n;
948 struct ipoib_mcast *mcast;
949 int ret = 1;
950
951 spin_lock_irq(&priv->lock);
952
953 n = rb_first(&priv->multicast_tree);
954
955 while (n) {
956 mcast = rb_entry(n, struct ipoib_mcast, rb_node);
957
958 if (memcmp(iter->mgid.raw, mcast->mcmember.mgid.raw,
959 sizeof (union ib_gid)) < 0) {
960 iter->mgid = mcast->mcmember.mgid;
961 iter->created = mcast->created;
962 iter->queuelen = skb_queue_len(&mcast->pkt_queue);
963 iter->complete = !!mcast->ah;
964 iter->send_only = !!(mcast->flags & (1 << IPOIB_MCAST_FLAG_SENDONLY));
965
966 ret = 0;
967
968 break;
969 }
970
971 n = rb_next(n);
972 }
973
974 spin_unlock_irq(&priv->lock);
975
976 return ret;
977}
978
979void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
980 union ib_gid *mgid,
981 unsigned long *created,
982 unsigned int *queuelen,
983 unsigned int *complete,
984 unsigned int *send_only)
985{
986 *mgid = iter->mgid;
987 *created = iter->created;
988 *queuelen = iter->queuelen;
989 *complete = iter->complete;
990 *send_only = iter->send_only;
991}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
new file mode 100644
index 000000000000..4933edf062c2
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_verbs.c 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#include <ib_cache.h>
36
37#include "ipoib.h"
38
39int ipoib_mcast_attach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
40{
41 struct ipoib_dev_priv *priv = netdev_priv(dev);
42 struct ib_qp_attr *qp_attr;
43 int attr_mask;
44 int ret;
45 u16 pkey_index;
46
47 ret = -ENOMEM;
48 qp_attr = kmalloc(sizeof *qp_attr, GFP_KERNEL);
49 if (!qp_attr)
50 goto out;
51
52 if (ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
53 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
54 ret = -ENXIO;
55 goto out;
56 }
57 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
58
59 /* set correct QKey for QP */
60 qp_attr->qkey = priv->qkey;
61 attr_mask = IB_QP_QKEY;
62 ret = ib_modify_qp(priv->qp, qp_attr, attr_mask);
63 if (ret) {
64 ipoib_warn(priv, "failed to modify QP, ret = %d\n", ret);
65 goto out;
66 }
67
68 /* attach QP to multicast group */
69 down(&priv->mcast_mutex);
70 ret = ib_attach_mcast(priv->qp, mgid, mlid);
71 up(&priv->mcast_mutex);
72 if (ret)
73 ipoib_warn(priv, "failed to attach to multicast group, ret = %d\n", ret);
74
75out:
76 kfree(qp_attr);
77 return ret;
78}
79
80int ipoib_mcast_detach(struct net_device *dev, u16 mlid, union ib_gid *mgid)
81{
82 struct ipoib_dev_priv *priv = netdev_priv(dev);
83 int ret;
84
85 down(&priv->mcast_mutex);
86 ret = ib_detach_mcast(priv->qp, mgid, mlid);
87 up(&priv->mcast_mutex);
88 if (ret)
89 ipoib_warn(priv, "ib_detach_mcast failed (result = %d)\n", ret);
90
91 return ret;
92}
93
94int ipoib_qp_create(struct net_device *dev)
95{
96 struct ipoib_dev_priv *priv = netdev_priv(dev);
97 int ret;
98 u16 pkey_index;
99 struct ib_qp_attr qp_attr;
100 int attr_mask;
101
102 /*
103 * Search through the port P_Key table for the requested pkey value.
104 * The port has to be assigned to the respective IB partition in
105 * advance.
106 */
107 ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index);
108 if (ret) {
109 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
110 return ret;
111 }
112 set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
113
114 qp_attr.qp_state = IB_QPS_INIT;
115 qp_attr.qkey = 0;
116 qp_attr.port_num = priv->port;
117 qp_attr.pkey_index = pkey_index;
118 attr_mask =
119 IB_QP_QKEY |
120 IB_QP_PORT |
121 IB_QP_PKEY_INDEX |
122 IB_QP_STATE;
123 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
124 if (ret) {
125 ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
126 goto out_fail;
127 }
128
129 qp_attr.qp_state = IB_QPS_RTR;
130 /* Can't set this in a INIT->RTR transition */
131 attr_mask &= ~IB_QP_PORT;
132 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
133 if (ret) {
134 ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
135 goto out_fail;
136 }
137
138 qp_attr.qp_state = IB_QPS_RTS;
139 qp_attr.sq_psn = 0;
140 attr_mask |= IB_QP_SQ_PSN;
141 attr_mask &= ~IB_QP_PKEY_INDEX;
142 ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
143 if (ret) {
144 ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
145 goto out_fail;
146 }
147
148 return 0;
149
150out_fail:
151 ib_destroy_qp(priv->qp);
152 priv->qp = NULL;
153
154 return -EINVAL;
155}
156
157int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
158{
159 struct ipoib_dev_priv *priv = netdev_priv(dev);
160 struct ib_qp_init_attr init_attr = {
161 .cap = {
162 .max_send_wr = IPOIB_TX_RING_SIZE,
163 .max_recv_wr = IPOIB_RX_RING_SIZE,
164 .max_send_sge = 1,
165 .max_recv_sge = 1
166 },
167 .sq_sig_type = IB_SIGNAL_ALL_WR,
168 .qp_type = IB_QPT_UD
169 };
170
171 priv->pd = ib_alloc_pd(priv->ca);
172 if (IS_ERR(priv->pd)) {
173 printk(KERN_WARNING "%s: failed to allocate PD\n", ca->name);
174 return -ENODEV;
175 }
176
177 priv->cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, dev,
178 IPOIB_TX_RING_SIZE + IPOIB_RX_RING_SIZE + 1);
179 if (IS_ERR(priv->cq)) {
180 printk(KERN_WARNING "%s: failed to create CQ\n", ca->name);
181 goto out_free_pd;
182 }
183
184 if (ib_req_notify_cq(priv->cq, IB_CQ_NEXT_COMP))
185 goto out_free_cq;
186
187 priv->mr = ib_get_dma_mr(priv->pd, IB_ACCESS_LOCAL_WRITE);
188 if (IS_ERR(priv->mr)) {
189 printk(KERN_WARNING "%s: ib_get_dma_mr failed\n", ca->name);
190 goto out_free_cq;
191 }
192
193 init_attr.send_cq = priv->cq;
194 init_attr.recv_cq = priv->cq,
195
196 priv->qp = ib_create_qp(priv->pd, &init_attr);
197 if (IS_ERR(priv->qp)) {
198 printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
199 goto out_free_mr;
200 }
201
202 priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
203 priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
204 priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
205
206 priv->tx_sge.lkey = priv->mr->lkey;
207
208 priv->tx_wr.opcode = IB_WR_SEND;
209 priv->tx_wr.sg_list = &priv->tx_sge;
210 priv->tx_wr.num_sge = 1;
211 priv->tx_wr.send_flags = IB_SEND_SIGNALED;
212
213 return 0;
214
215out_free_mr:
216 ib_dereg_mr(priv->mr);
217
218out_free_cq:
219 ib_destroy_cq(priv->cq);
220
221out_free_pd:
222 ib_dealloc_pd(priv->pd);
223 return -ENODEV;
224}
225
226void ipoib_transport_dev_cleanup(struct net_device *dev)
227{
228 struct ipoib_dev_priv *priv = netdev_priv(dev);
229
230 if (priv->qp) {
231 if (ib_destroy_qp(priv->qp))
232 ipoib_warn(priv, "ib_qp_destroy failed\n");
233
234 priv->qp = NULL;
235 clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
236 }
237
238 if (ib_dereg_mr(priv->mr))
239 ipoib_warn(priv, "ib_dereg_mr failed\n");
240
241 if (ib_destroy_cq(priv->cq))
242 ipoib_warn(priv, "ib_cq_destroy failed\n");
243
244 if (ib_dealloc_pd(priv->pd))
245 ipoib_warn(priv, "ib_dealloc_pd failed\n");
246}
247
248void ipoib_event(struct ib_event_handler *handler,
249 struct ib_event *record)
250{
251 struct ipoib_dev_priv *priv =
252 container_of(handler, struct ipoib_dev_priv, event_handler);
253
254 if (record->event == IB_EVENT_PORT_ACTIVE ||
255 record->event == IB_EVENT_LID_CHANGE ||
256 record->event == IB_EVENT_SM_CHANGE) {
257 ipoib_dbg(priv, "Port active event\n");
258 schedule_work(&priv->flush_task);
259 }
260}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
new file mode 100644
index 000000000000..94b8ea812fef
--- /dev/null
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -0,0 +1,177 @@
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 * $Id: ipoib_vlan.c 1349 2004-12-16 21:09:43Z roland $
33 */
34
35#include <linux/version.h>
36#include <linux/module.h>
37
38#include <linux/init.h>
39#include <linux/slab.h>
40#include <linux/seq_file.h>
41
42#include <asm/uaccess.h>
43
44#include "ipoib.h"
45
46static ssize_t show_parent(struct class_device *class_dev, char *buf)
47{
48 struct net_device *dev =
49 container_of(class_dev, struct net_device, class_dev);
50 struct ipoib_dev_priv *priv = netdev_priv(dev);
51
52 return sprintf(buf, "%s\n", priv->parent->name);
53}
54static CLASS_DEVICE_ATTR(parent, S_IRUGO, show_parent, NULL);
55
56int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
57{
58 struct ipoib_dev_priv *ppriv, *priv;
59 char intf_name[IFNAMSIZ];
60 int result;
61
62 if (!capable(CAP_NET_ADMIN))
63 return -EPERM;
64
65 ppriv = netdev_priv(pdev);
66
67 down(&ppriv->vlan_mutex);
68
69 /*
70 * First ensure this isn't a duplicate. We check the parent device and
71 * then all of the child interfaces to make sure the Pkey doesn't match.
72 */
73 if (ppriv->pkey == pkey) {
74 result = -ENOTUNIQ;
75 goto err;
76 }
77
78 list_for_each_entry(priv, &ppriv->child_intfs, list) {
79 if (priv->pkey == pkey) {
80 result = -ENOTUNIQ;
81 goto err;
82 }
83 }
84
85 snprintf(intf_name, sizeof intf_name, "%s.%04x",
86 ppriv->dev->name, pkey);
87 priv = ipoib_intf_alloc(intf_name);
88 if (!priv) {
89 result = -ENOMEM;
90 goto err;
91 }
92
93 set_bit(IPOIB_FLAG_SUBINTERFACE, &priv->flags);
94
95 priv->pkey = pkey;
96
97 memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, INFINIBAND_ALEN);
98 priv->dev->broadcast[8] = pkey >> 8;
99 priv->dev->broadcast[9] = pkey & 0xff;
100
101 result = ipoib_dev_init(priv->dev, ppriv->ca, ppriv->port);
102 if (result < 0) {
103 ipoib_warn(ppriv, "failed to initialize subinterface: "
104 "device %s, port %d",
105 ppriv->ca->name, ppriv->port);
106 goto device_init_failed;
107 }
108
109 result = register_netdev(priv->dev);
110 if (result) {
111 ipoib_warn(priv, "failed to initialize; error %i", result);
112 goto register_failed;
113 }
114
115 priv->parent = ppriv->dev;
116
117 if (ipoib_create_debug_file(priv->dev))
118 goto debug_failed;
119
120 if (ipoib_add_pkey_attr(priv->dev))
121 goto sysfs_failed;
122
123 if (class_device_create_file(&priv->dev->class_dev,
124 &class_device_attr_parent))
125 goto sysfs_failed;
126
127 list_add_tail(&priv->list, &ppriv->child_intfs);
128
129 up(&ppriv->vlan_mutex);
130
131 return 0;
132
133sysfs_failed:
134 ipoib_delete_debug_file(priv->dev);
135
136debug_failed:
137 unregister_netdev(priv->dev);
138
139register_failed:
140 ipoib_dev_cleanup(priv->dev);
141
142device_init_failed:
143 free_netdev(priv->dev);
144
145err:
146 up(&ppriv->vlan_mutex);
147 return result;
148}
149
150int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
151{
152 struct ipoib_dev_priv *ppriv, *priv, *tpriv;
153 int ret = -ENOENT;
154
155 if (!capable(CAP_NET_ADMIN))
156 return -EPERM;
157
158 ppriv = netdev_priv(pdev);
159
160 down(&ppriv->vlan_mutex);
161 list_for_each_entry_safe(priv, tpriv, &ppriv->child_intfs, list) {
162 if (priv->pkey == pkey) {
163 unregister_netdev(priv->dev);
164 ipoib_dev_cleanup(priv->dev);
165
166 list_del(&priv->list);
167
168 kfree(priv);
169
170 ret = 0;
171 break;
172 }
173 }
174 up(&ppriv->vlan_mutex);
175
176 return ret;
177}