aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mlx4/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mlx4/main.c')
-rw-r--r--drivers/infiniband/hw/mlx4/main.c553
1 files changed, 511 insertions, 42 deletions
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 4e94e360e43b..bf3e20cd0298 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -35,9 +35,14 @@
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/errno.h> 37#include <linux/errno.h>
38#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
41#include <linux/if_vlan.h>
38 42
39#include <rdma/ib_smi.h> 43#include <rdma/ib_smi.h>
40#include <rdma/ib_user_verbs.h> 44#include <rdma/ib_user_verbs.h>
45#include <rdma/ib_addr.h>
41 46
42#include <linux/mlx4/driver.h> 47#include <linux/mlx4/driver.h>
43#include <linux/mlx4/cmd.h> 48#include <linux/mlx4/cmd.h>
@@ -58,6 +63,15 @@ static const char mlx4_ib_version[] =
58 DRV_NAME ": Mellanox ConnectX InfiniBand driver v" 63 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
59 DRV_VERSION " (" DRV_RELDATE ")\n"; 64 DRV_VERSION " (" DRV_RELDATE ")\n";
60 65
66struct update_gid_work {
67 struct work_struct work;
68 union ib_gid gids[128];
69 struct mlx4_ib_dev *dev;
70 int port;
71};
72
73static struct workqueue_struct *wq;
74
61static void init_query_mad(struct ib_smp *mad) 75static void init_query_mad(struct ib_smp *mad)
62{ 76{
63 mad->base_version = 1; 77 mad->base_version = 1;
@@ -66,6 +80,8 @@ static void init_query_mad(struct ib_smp *mad)
66 mad->method = IB_MGMT_METHOD_GET; 80 mad->method = IB_MGMT_METHOD_GET;
67} 81}
68 82
83static union ib_gid zgid;
84
69static int mlx4_ib_query_device(struct ib_device *ibdev, 85static int mlx4_ib_query_device(struct ib_device *ibdev,
70 struct ib_device_attr *props) 86 struct ib_device_attr *props)
71{ 87{
@@ -135,7 +151,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
135 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs; 151 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
136 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; 152 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
137 props->max_srq_sge = dev->dev->caps.max_srq_sge; 153 props->max_srq_sge = dev->dev->caps.max_srq_sge;
138 props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64); 154 props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES;
139 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; 155 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
140 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? 156 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
141 IB_ATOMIC_HCA : IB_ATOMIC_NONE; 157 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
@@ -154,28 +170,19 @@ out:
154 return err; 170 return err;
155} 171}
156 172
157static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, 173static enum rdma_link_layer
158 struct ib_port_attr *props) 174mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
159{ 175{
160 struct ib_smp *in_mad = NULL; 176 struct mlx4_dev *dev = to_mdev(device)->dev;
161 struct ib_smp *out_mad = NULL;
162 int err = -ENOMEM;
163
164 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
165 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
166 if (!in_mad || !out_mad)
167 goto out;
168
169 memset(props, 0, sizeof *props);
170
171 init_query_mad(in_mad);
172 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
173 in_mad->attr_mod = cpu_to_be32(port);
174 177
175 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); 178 return dev->caps.port_mask & (1 << (port_num - 1)) ?
176 if (err) 179 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
177 goto out; 180}
178 181
182static int ib_link_query_port(struct ib_device *ibdev, u8 port,
183 struct ib_port_attr *props,
184 struct ib_smp *out_mad)
185{
179 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); 186 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
180 props->lmc = out_mad->data[34] & 0x7; 187 props->lmc = out_mad->data[34] & 0x7;
181 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18)); 188 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
@@ -196,6 +203,80 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
196 props->max_vl_num = out_mad->data[37] >> 4; 203 props->max_vl_num = out_mad->data[37] >> 4;
197 props->init_type_reply = out_mad->data[41] >> 4; 204 props->init_type_reply = out_mad->data[41] >> 4;
198 205
206 return 0;
207}
208
209static u8 state_to_phys_state(enum ib_port_state state)
210{
211 return state == IB_PORT_ACTIVE ? 5 : 3;
212}
213
214static int eth_link_query_port(struct ib_device *ibdev, u8 port,
215 struct ib_port_attr *props,
216 struct ib_smp *out_mad)
217{
218 struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
219 struct net_device *ndev;
220 enum ib_mtu tmp;
221
222 props->active_width = IB_WIDTH_4X;
223 props->active_speed = 4;
224 props->port_cap_flags = IB_PORT_CM_SUP;
225 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
226 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
227 props->pkey_tbl_len = 1;
228 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
229 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
230 props->max_mtu = IB_MTU_2048;
231 props->subnet_timeout = 0;
232 props->max_vl_num = out_mad->data[37] >> 4;
233 props->init_type_reply = 0;
234 props->state = IB_PORT_DOWN;
235 props->phys_state = state_to_phys_state(props->state);
236 props->active_mtu = IB_MTU_256;
237 spin_lock(&iboe->lock);
238 ndev = iboe->netdevs[port - 1];
239 if (!ndev)
240 goto out;
241
242 tmp = iboe_get_mtu(ndev->mtu);
243 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
244
245 props->state = netif_running(ndev) && netif_oper_up(ndev) ?
246 IB_PORT_ACTIVE : IB_PORT_DOWN;
247 props->phys_state = state_to_phys_state(props->state);
248
249out:
250 spin_unlock(&iboe->lock);
251 return 0;
252}
253
254static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
255 struct ib_port_attr *props)
256{
257 struct ib_smp *in_mad = NULL;
258 struct ib_smp *out_mad = NULL;
259 int err = -ENOMEM;
260
261 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
262 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
263 if (!in_mad || !out_mad)
264 goto out;
265
266 memset(props, 0, sizeof *props);
267
268 init_query_mad(in_mad);
269 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
270 in_mad->attr_mod = cpu_to_be32(port);
271
272 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
273 if (err)
274 goto out;
275
276 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
277 ib_link_query_port(ibdev, port, props, out_mad) :
278 eth_link_query_port(ibdev, port, props, out_mad);
279
199out: 280out:
200 kfree(in_mad); 281 kfree(in_mad);
201 kfree(out_mad); 282 kfree(out_mad);
@@ -203,8 +284,8 @@ out:
203 return err; 284 return err;
204} 285}
205 286
206static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, 287static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
207 union ib_gid *gid) 288 union ib_gid *gid)
208{ 289{
209 struct ib_smp *in_mad = NULL; 290 struct ib_smp *in_mad = NULL;
210 struct ib_smp *out_mad = NULL; 291 struct ib_smp *out_mad = NULL;
@@ -241,6 +322,25 @@ out:
241 return err; 322 return err;
242} 323}
243 324
325static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
326 union ib_gid *gid)
327{
328 struct mlx4_ib_dev *dev = to_mdev(ibdev);
329
330 *gid = dev->iboe.gid_table[port - 1][index];
331
332 return 0;
333}
334
335static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
336 union ib_gid *gid)
337{
338 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
339 return __mlx4_ib_query_gid(ibdev, port, index, gid);
340 else
341 return iboe_query_gid(ibdev, port, index, gid);
342}
343
244static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, 344static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
245 u16 *pkey) 345 u16 *pkey)
246{ 346{
@@ -272,14 +372,32 @@ out:
272static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, 372static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
273 struct ib_device_modify *props) 373 struct ib_device_modify *props)
274{ 374{
375 struct mlx4_cmd_mailbox *mailbox;
376
275 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) 377 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
276 return -EOPNOTSUPP; 378 return -EOPNOTSUPP;
277 379
278 if (mask & IB_DEVICE_MODIFY_NODE_DESC) { 380 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
279 spin_lock(&to_mdev(ibdev)->sm_lock); 381 return 0;
280 memcpy(ibdev->node_desc, props->node_desc, 64); 382
281 spin_unlock(&to_mdev(ibdev)->sm_lock); 383 spin_lock(&to_mdev(ibdev)->sm_lock);
282 } 384 memcpy(ibdev->node_desc, props->node_desc, 64);
385 spin_unlock(&to_mdev(ibdev)->sm_lock);
386
387 /*
388 * If possible, pass node desc to FW, so it can generate
389 * a 144 trap. If cmd fails, just ignore.
390 */
391 mailbox = mlx4_alloc_cmd_mailbox(to_mdev(ibdev)->dev);
392 if (IS_ERR(mailbox))
393 return 0;
394
395 memset(mailbox->buf, 0, 256);
396 memcpy(mailbox->buf, props->node_desc, 64);
397 mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
398 MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
399
400 mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
283 401
284 return 0; 402 return 0;
285} 403}
@@ -289,6 +407,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
289{ 407{
290 struct mlx4_cmd_mailbox *mailbox; 408 struct mlx4_cmd_mailbox *mailbox;
291 int err; 409 int err;
410 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
292 411
293 mailbox = mlx4_alloc_cmd_mailbox(dev->dev); 412 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
294 if (IS_ERR(mailbox)) 413 if (IS_ERR(mailbox))
@@ -304,7 +423,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
304 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); 423 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
305 } 424 }
306 425
307 err = mlx4_cmd(dev->dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, 426 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
308 MLX4_CMD_TIME_CLASS_B); 427 MLX4_CMD_TIME_CLASS_B);
309 428
310 mlx4_free_cmd_mailbox(dev->dev, mailbox); 429 mlx4_free_cmd_mailbox(dev->dev, mailbox);
@@ -447,18 +566,132 @@ static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
447 return 0; 566 return 0;
448} 567}
449 568
569static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
570{
571 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
572 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
573 struct mlx4_ib_gid_entry *ge;
574
575 ge = kzalloc(sizeof *ge, GFP_KERNEL);
576 if (!ge)
577 return -ENOMEM;
578
579 ge->gid = *gid;
580 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
581 ge->port = mqp->port;
582 ge->added = 1;
583 }
584
585 mutex_lock(&mqp->mutex);
586 list_add_tail(&ge->list, &mqp->gid_list);
587 mutex_unlock(&mqp->mutex);
588
589 return 0;
590}
591
592int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
593 union ib_gid *gid)
594{
595 u8 mac[6];
596 struct net_device *ndev;
597 int ret = 0;
598
599 if (!mqp->port)
600 return 0;
601
602 spin_lock(&mdev->iboe.lock);
603 ndev = mdev->iboe.netdevs[mqp->port - 1];
604 if (ndev)
605 dev_hold(ndev);
606 spin_unlock(&mdev->iboe.lock);
607
608 if (ndev) {
609 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
610 rtnl_lock();
611 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
612 ret = 1;
613 rtnl_unlock();
614 dev_put(ndev);
615 }
616
617 return ret;
618}
619
450static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 620static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
451{ 621{
452 return mlx4_multicast_attach(to_mdev(ibqp->device)->dev, 622 int err;
453 &to_mqp(ibqp)->mqp, gid->raw, 623 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
454 !!(to_mqp(ibqp)->flags & 624 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
455 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK)); 625
626 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
627 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
628 if (err)
629 return err;
630
631 err = add_gid_entry(ibqp, gid);
632 if (err)
633 goto err_add;
634
635 return 0;
636
637err_add:
638 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
639 return err;
640}
641
642static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
643{
644 struct mlx4_ib_gid_entry *ge;
645 struct mlx4_ib_gid_entry *tmp;
646 struct mlx4_ib_gid_entry *ret = NULL;
647
648 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
649 if (!memcmp(raw, ge->gid.raw, 16)) {
650 ret = ge;
651 break;
652 }
653 }
654
655 return ret;
456} 656}
457 657
458static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) 658static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
459{ 659{
460 return mlx4_multicast_detach(to_mdev(ibqp->device)->dev, 660 int err;
461 &to_mqp(ibqp)->mqp, gid->raw); 661 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
662 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
663 u8 mac[6];
664 struct net_device *ndev;
665 struct mlx4_ib_gid_entry *ge;
666
667 err = mlx4_multicast_detach(mdev->dev,
668 &mqp->mqp, gid->raw);
669 if (err)
670 return err;
671
672 mutex_lock(&mqp->mutex);
673 ge = find_gid_entry(mqp, gid->raw);
674 if (ge) {
675 spin_lock(&mdev->iboe.lock);
676 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
677 if (ndev)
678 dev_hold(ndev);
679 spin_unlock(&mdev->iboe.lock);
680 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
681 if (ndev) {
682 rtnl_lock();
683 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
684 rtnl_unlock();
685 dev_put(ndev);
686 }
687 list_del(&ge->list);
688 kfree(ge);
689 } else
690 printk(KERN_WARNING "could not find mgid entry\n");
691
692 mutex_unlock(&mqp->mutex);
693
694 return 0;
462} 695}
463 696
464static int init_node_data(struct mlx4_ib_dev *dev) 697static int init_node_data(struct mlx4_ib_dev *dev)
@@ -543,15 +776,215 @@ static struct device_attribute *mlx4_class_attributes[] = {
543 &dev_attr_board_id 776 &dev_attr_board_id
544}; 777};
545 778
779static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id, struct net_device *dev)
780{
781 memcpy(eui, dev->dev_addr, 3);
782 memcpy(eui + 5, dev->dev_addr + 3, 3);
783 if (vlan_id < 0x1000) {
784 eui[3] = vlan_id >> 8;
785 eui[4] = vlan_id & 0xff;
786 } else {
787 eui[3] = 0xff;
788 eui[4] = 0xfe;
789 }
790 eui[0] ^= 2;
791}
792
793static void update_gids_task(struct work_struct *work)
794{
795 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
796 struct mlx4_cmd_mailbox *mailbox;
797 union ib_gid *gids;
798 int err;
799 struct mlx4_dev *dev = gw->dev->dev;
800 struct ib_event event;
801
802 mailbox = mlx4_alloc_cmd_mailbox(dev);
803 if (IS_ERR(mailbox)) {
804 printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
805 return;
806 }
807
808 gids = mailbox->buf;
809 memcpy(gids, gw->gids, sizeof gw->gids);
810
811 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
812 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
813 if (err)
814 printk(KERN_WARNING "set port command failed\n");
815 else {
816 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
817 event.device = &gw->dev->ib_dev;
818 event.element.port_num = gw->port;
819 event.event = IB_EVENT_LID_CHANGE;
820 ib_dispatch_event(&event);
821 }
822
823 mlx4_free_cmd_mailbox(dev, mailbox);
824 kfree(gw);
825}
826
827static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
828{
829 struct net_device *ndev = dev->iboe.netdevs[port - 1];
830 struct update_gid_work *work;
831 struct net_device *tmp;
832 int i;
833 u8 *hits;
834 int ret;
835 union ib_gid gid;
836 int free;
837 int found;
838 int need_update = 0;
839 u16 vid;
840
841 work = kzalloc(sizeof *work, GFP_ATOMIC);
842 if (!work)
843 return -ENOMEM;
844
845 hits = kzalloc(128, GFP_ATOMIC);
846 if (!hits) {
847 ret = -ENOMEM;
848 goto out;
849 }
850
851 read_lock(&dev_base_lock);
852 for_each_netdev(&init_net, tmp) {
853 if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
854 gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
855 vid = rdma_vlan_dev_vlan_id(tmp);
856 mlx4_addrconf_ifid_eui48(&gid.raw[8], vid, ndev);
857 found = 0;
858 free = -1;
859 for (i = 0; i < 128; ++i) {
860 if (free < 0 &&
861 !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
862 free = i;
863 if (!memcmp(&dev->iboe.gid_table[port - 1][i], &gid, sizeof gid)) {
864 hits[i] = 1;
865 found = 1;
866 break;
867 }
868 }
869
870 if (!found) {
871 if (tmp == ndev &&
872 (memcmp(&dev->iboe.gid_table[port - 1][0],
873 &gid, sizeof gid) ||
874 !memcmp(&dev->iboe.gid_table[port - 1][0],
875 &zgid, sizeof gid))) {
876 dev->iboe.gid_table[port - 1][0] = gid;
877 ++need_update;
878 hits[0] = 1;
879 } else if (free >= 0) {
880 dev->iboe.gid_table[port - 1][free] = gid;
881 hits[free] = 1;
882 ++need_update;
883 }
884 }
885 }
886 }
887 read_unlock(&dev_base_lock);
888
889 for (i = 0; i < 128; ++i)
890 if (!hits[i]) {
891 if (memcmp(&dev->iboe.gid_table[port - 1][i], &zgid, sizeof zgid))
892 ++need_update;
893 dev->iboe.gid_table[port - 1][i] = zgid;
894 }
895
896 if (need_update) {
897 memcpy(work->gids, dev->iboe.gid_table[port - 1], sizeof work->gids);
898 INIT_WORK(&work->work, update_gids_task);
899 work->port = port;
900 work->dev = dev;
901 queue_work(wq, &work->work);
902 } else
903 kfree(work);
904
905 kfree(hits);
906 return 0;
907
908out:
909 kfree(work);
910 return ret;
911}
912
913static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
914{
915 switch (event) {
916 case NETDEV_UP:
917 case NETDEV_CHANGEADDR:
918 update_ipv6_gids(dev, port, 0);
919 break;
920
921 case NETDEV_DOWN:
922 update_ipv6_gids(dev, port, 1);
923 dev->iboe.netdevs[port - 1] = NULL;
924 }
925}
926
927static void netdev_added(struct mlx4_ib_dev *dev, int port)
928{
929 update_ipv6_gids(dev, port, 0);
930}
931
932static void netdev_removed(struct mlx4_ib_dev *dev, int port)
933{
934 update_ipv6_gids(dev, port, 1);
935}
936
937static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
938 void *ptr)
939{
940 struct net_device *dev = ptr;
941 struct mlx4_ib_dev *ibdev;
942 struct net_device *oldnd;
943 struct mlx4_ib_iboe *iboe;
944 int port;
945
946 if (!net_eq(dev_net(dev), &init_net))
947 return NOTIFY_DONE;
948
949 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
950 iboe = &ibdev->iboe;
951
952 spin_lock(&iboe->lock);
953 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
954 oldnd = iboe->netdevs[port - 1];
955 iboe->netdevs[port - 1] =
956 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
957 if (oldnd != iboe->netdevs[port - 1]) {
958 if (iboe->netdevs[port - 1])
959 netdev_added(ibdev, port);
960 else
961 netdev_removed(ibdev, port);
962 }
963 }
964
965 if (dev == iboe->netdevs[0] ||
966 (iboe->netdevs[0] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[0]))
967 handle_en_event(ibdev, 1, event);
968 else if (dev == iboe->netdevs[1]
969 || (iboe->netdevs[1] && rdma_vlan_dev_real_dev(dev) == iboe->netdevs[1]))
970 handle_en_event(ibdev, 2, event);
971
972 spin_unlock(&iboe->lock);
973
974 return NOTIFY_DONE;
975}
976
546static void *mlx4_ib_add(struct mlx4_dev *dev) 977static void *mlx4_ib_add(struct mlx4_dev *dev)
547{ 978{
548 struct mlx4_ib_dev *ibdev; 979 struct mlx4_ib_dev *ibdev;
549 int num_ports = 0; 980 int num_ports = 0;
550 int i; 981 int i;
982 int err;
983 struct mlx4_ib_iboe *iboe;
551 984
552 printk_once(KERN_INFO "%s", mlx4_ib_version); 985 printk_once(KERN_INFO "%s", mlx4_ib_version);
553 986
554 mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) 987 mlx4_foreach_ib_transport_port(i, dev)
555 num_ports++; 988 num_ports++;
556 989
557 /* No point in registering a device with no ports... */ 990 /* No point in registering a device with no ports... */
@@ -564,6 +997,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
564 return NULL; 997 return NULL;
565 } 998 }
566 999
1000 iboe = &ibdev->iboe;
1001
567 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn)) 1002 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
568 goto err_dealloc; 1003 goto err_dealloc;
569 1004
@@ -612,6 +1047,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
612 1047
613 ibdev->ib_dev.query_device = mlx4_ib_query_device; 1048 ibdev->ib_dev.query_device = mlx4_ib_query_device;
614 ibdev->ib_dev.query_port = mlx4_ib_query_port; 1049 ibdev->ib_dev.query_port = mlx4_ib_query_port;
1050 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
615 ibdev->ib_dev.query_gid = mlx4_ib_query_gid; 1051 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
616 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; 1052 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
617 ibdev->ib_dev.modify_device = mlx4_ib_modify_device; 1053 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
@@ -656,6 +1092,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
656 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr; 1092 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
657 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc; 1093 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
658 1094
1095 spin_lock_init(&iboe->lock);
1096
659 if (init_node_data(ibdev)) 1097 if (init_node_data(ibdev))
660 goto err_map; 1098 goto err_map;
661 1099
@@ -668,16 +1106,28 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
668 if (mlx4_ib_mad_init(ibdev)) 1106 if (mlx4_ib_mad_init(ibdev))
669 goto err_reg; 1107 goto err_reg;
670 1108
1109 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
1110 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1111 err = register_netdevice_notifier(&iboe->nb);
1112 if (err)
1113 goto err_reg;
1114 }
1115
671 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) { 1116 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
672 if (device_create_file(&ibdev->ib_dev.dev, 1117 if (device_create_file(&ibdev->ib_dev.dev,
673 mlx4_class_attributes[i])) 1118 mlx4_class_attributes[i]))
674 goto err_reg; 1119 goto err_notif;
675 } 1120 }
676 1121
677 ibdev->ib_active = true; 1122 ibdev->ib_active = true;
678 1123
679 return ibdev; 1124 return ibdev;
680 1125
1126err_notif:
1127 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1128 printk(KERN_WARNING "failure unregistering notifier\n");
1129 flush_workqueue(wq);
1130
681err_reg: 1131err_reg:
682 ib_unregister_device(&ibdev->ib_dev); 1132 ib_unregister_device(&ibdev->ib_dev);
683 1133
@@ -703,11 +1153,16 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
703 1153
704 mlx4_ib_mad_cleanup(ibdev); 1154 mlx4_ib_mad_cleanup(ibdev);
705 ib_unregister_device(&ibdev->ib_dev); 1155 ib_unregister_device(&ibdev->ib_dev);
1156 if (ibdev->iboe.nb.notifier_call) {
1157 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1158 printk(KERN_WARNING "failure unregistering notifier\n");
1159 ibdev->iboe.nb.notifier_call = NULL;
1160 }
1161 iounmap(ibdev->uar_map);
706 1162
707 for (p = 1; p <= ibdev->num_ports; ++p) 1163 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
708 mlx4_CLOSE_PORT(dev, p); 1164 mlx4_CLOSE_PORT(dev, p);
709 1165
710 iounmap(ibdev->uar_map);
711 mlx4_uar_free(dev, &ibdev->priv_uar); 1166 mlx4_uar_free(dev, &ibdev->priv_uar);
712 mlx4_pd_free(dev, ibdev->priv_pdn); 1167 mlx4_pd_free(dev, ibdev->priv_pdn);
713 ib_dealloc_device(&ibdev->ib_dev); 1168 ib_dealloc_device(&ibdev->ib_dev);
@@ -747,19 +1202,33 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
747} 1202}
748 1203
749static struct mlx4_interface mlx4_ib_interface = { 1204static struct mlx4_interface mlx4_ib_interface = {
750 .add = mlx4_ib_add, 1205 .add = mlx4_ib_add,
751 .remove = mlx4_ib_remove, 1206 .remove = mlx4_ib_remove,
752 .event = mlx4_ib_event 1207 .event = mlx4_ib_event,
1208 .protocol = MLX4_PROTOCOL_IB
753}; 1209};
754 1210
755static int __init mlx4_ib_init(void) 1211static int __init mlx4_ib_init(void)
756{ 1212{
757 return mlx4_register_interface(&mlx4_ib_interface); 1213 int err;
1214
1215 wq = create_singlethread_workqueue("mlx4_ib");
1216 if (!wq)
1217 return -ENOMEM;
1218
1219 err = mlx4_register_interface(&mlx4_ib_interface);
1220 if (err) {
1221 destroy_workqueue(wq);
1222 return err;
1223 }
1224
1225 return 0;
758} 1226}
759 1227
760static void __exit mlx4_ib_cleanup(void) 1228static void __exit mlx4_ib_cleanup(void)
761{ 1229{
762 mlx4_unregister_interface(&mlx4_ib_interface); 1230 mlx4_unregister_interface(&mlx4_ib_interface);
1231 destroy_workqueue(wq);
763} 1232}
764 1233
765module_init(mlx4_ib_init); 1234module_init(mlx4_ib_init);