diff options
author | Or Gerlitz <ogerlitz@voltaire.com> | 2008-07-22 17:14:22 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-07-22 17:14:22 -0400 |
commit | dd5bdff83b19d9174126e0398b47117c3a80e22d (patch) | |
tree | 2bdbc102afcc851d1cd0bef07e3a47d4e9ec1ae9 /drivers/infiniband | |
parent | d35cb360c29956510b2fe1a953bd4968536f7216 (diff) |
RDMA/cma: Add RDMA_CM_EVENT_ADDR_CHANGE event
Add an RDMA_CM_EVENT_ADDR_CHANGE event can be used by rdma-cm
consumers that wish to have their RDMA sessions always use the same
links (eg <hca/port>) as the IP stack does. In the current code, this
does not happen when bonding is used and fail-over happened but the IB
link used by an already existing session is operating fine.
Use the netevent notification for sensing that a change has happened
in the IP stack, then scan the rdma-cm ID list to see if there is an
ID that is "misaligned" with respect to the IP stack, and deliver
RDMA_CM_EVENT_ADDR_CHANGE for this ID. The consumer can act on the
event or just ignore it.
Signed-off-by: Or Gerlitz <ogerlitz@voltaire.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/core/cma.c | 92 |
1 files changed, 92 insertions, 0 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index ae11d5cc74d0..79792c92e6fb 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -168,6 +168,12 @@ struct cma_work { | |||
168 | struct rdma_cm_event event; | 168 | struct rdma_cm_event event; |
169 | }; | 169 | }; |
170 | 170 | ||
171 | struct cma_ndev_work { | ||
172 | struct work_struct work; | ||
173 | struct rdma_id_private *id; | ||
174 | struct rdma_cm_event event; | ||
175 | }; | ||
176 | |||
171 | union cma_ip_addr { | 177 | union cma_ip_addr { |
172 | struct in6_addr ip6; | 178 | struct in6_addr ip6; |
173 | struct { | 179 | struct { |
@@ -1598,6 +1604,30 @@ out: | |||
1598 | kfree(work); | 1604 | kfree(work); |
1599 | } | 1605 | } |
1600 | 1606 | ||
1607 | static void cma_ndev_work_handler(struct work_struct *_work) | ||
1608 | { | ||
1609 | struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work); | ||
1610 | struct rdma_id_private *id_priv = work->id; | ||
1611 | int destroy = 0; | ||
1612 | |||
1613 | mutex_lock(&id_priv->handler_mutex); | ||
1614 | if (id_priv->state == CMA_DESTROYING || | ||
1615 | id_priv->state == CMA_DEVICE_REMOVAL) | ||
1616 | goto out; | ||
1617 | |||
1618 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { | ||
1619 | cma_exch(id_priv, CMA_DESTROYING); | ||
1620 | destroy = 1; | ||
1621 | } | ||
1622 | |||
1623 | out: | ||
1624 | mutex_unlock(&id_priv->handler_mutex); | ||
1625 | cma_deref_id(id_priv); | ||
1626 | if (destroy) | ||
1627 | rdma_destroy_id(&id_priv->id); | ||
1628 | kfree(work); | ||
1629 | } | ||
1630 | |||
1601 | static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | 1631 | static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) |
1602 | { | 1632 | { |
1603 | struct rdma_route *route = &id_priv->id.route; | 1633 | struct rdma_route *route = &id_priv->id.route; |
@@ -2723,6 +2753,65 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2723 | } | 2753 | } |
2724 | EXPORT_SYMBOL(rdma_leave_multicast); | 2754 | EXPORT_SYMBOL(rdma_leave_multicast); |
2725 | 2755 | ||
2756 | static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) | ||
2757 | { | ||
2758 | struct rdma_dev_addr *dev_addr; | ||
2759 | struct cma_ndev_work *work; | ||
2760 | |||
2761 | dev_addr = &id_priv->id.route.addr.dev_addr; | ||
2762 | |||
2763 | if ((dev_addr->src_dev == ndev) && | ||
2764 | memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { | ||
2765 | printk(KERN_INFO "RDMA CM addr change for ndev %s used by id %p\n", | ||
2766 | ndev->name, &id_priv->id); | ||
2767 | work = kzalloc(sizeof *work, GFP_KERNEL); | ||
2768 | if (!work) | ||
2769 | return -ENOMEM; | ||
2770 | |||
2771 | INIT_WORK(&work->work, cma_ndev_work_handler); | ||
2772 | work->id = id_priv; | ||
2773 | work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; | ||
2774 | atomic_inc(&id_priv->refcount); | ||
2775 | queue_work(cma_wq, &work->work); | ||
2776 | } | ||
2777 | |||
2778 | return 0; | ||
2779 | } | ||
2780 | |||
2781 | static int cma_netdev_callback(struct notifier_block *self, unsigned long event, | ||
2782 | void *ctx) | ||
2783 | { | ||
2784 | struct net_device *ndev = (struct net_device *)ctx; | ||
2785 | struct cma_device *cma_dev; | ||
2786 | struct rdma_id_private *id_priv; | ||
2787 | int ret = NOTIFY_DONE; | ||
2788 | |||
2789 | if (dev_net(ndev) != &init_net) | ||
2790 | return NOTIFY_DONE; | ||
2791 | |||
2792 | if (event != NETDEV_BONDING_FAILOVER) | ||
2793 | return NOTIFY_DONE; | ||
2794 | |||
2795 | if (!(ndev->flags & IFF_MASTER) || !(ndev->priv_flags & IFF_BONDING)) | ||
2796 | return NOTIFY_DONE; | ||
2797 | |||
2798 | mutex_lock(&lock); | ||
2799 | list_for_each_entry(cma_dev, &dev_list, list) | ||
2800 | list_for_each_entry(id_priv, &cma_dev->id_list, list) { | ||
2801 | ret = cma_netdev_change(ndev, id_priv); | ||
2802 | if (ret) | ||
2803 | goto out; | ||
2804 | } | ||
2805 | |||
2806 | out: | ||
2807 | mutex_unlock(&lock); | ||
2808 | return ret; | ||
2809 | } | ||
2810 | |||
2811 | static struct notifier_block cma_nb = { | ||
2812 | .notifier_call = cma_netdev_callback | ||
2813 | }; | ||
2814 | |||
2726 | static void cma_add_one(struct ib_device *device) | 2815 | static void cma_add_one(struct ib_device *device) |
2727 | { | 2816 | { |
2728 | struct cma_device *cma_dev; | 2817 | struct cma_device *cma_dev; |
@@ -2831,6 +2920,7 @@ static int cma_init(void) | |||
2831 | 2920 | ||
2832 | ib_sa_register_client(&sa_client); | 2921 | ib_sa_register_client(&sa_client); |
2833 | rdma_addr_register_client(&addr_client); | 2922 | rdma_addr_register_client(&addr_client); |
2923 | register_netdevice_notifier(&cma_nb); | ||
2834 | 2924 | ||
2835 | ret = ib_register_client(&cma_client); | 2925 | ret = ib_register_client(&cma_client); |
2836 | if (ret) | 2926 | if (ret) |
@@ -2838,6 +2928,7 @@ static int cma_init(void) | |||
2838 | return 0; | 2928 | return 0; |
2839 | 2929 | ||
2840 | err: | 2930 | err: |
2931 | unregister_netdevice_notifier(&cma_nb); | ||
2841 | rdma_addr_unregister_client(&addr_client); | 2932 | rdma_addr_unregister_client(&addr_client); |
2842 | ib_sa_unregister_client(&sa_client); | 2933 | ib_sa_unregister_client(&sa_client); |
2843 | destroy_workqueue(cma_wq); | 2934 | destroy_workqueue(cma_wq); |
@@ -2847,6 +2938,7 @@ err: | |||
2847 | static void cma_cleanup(void) | 2938 | static void cma_cleanup(void) |
2848 | { | 2939 | { |
2849 | ib_unregister_client(&cma_client); | 2940 | ib_unregister_client(&cma_client); |
2941 | unregister_netdevice_notifier(&cma_nb); | ||
2850 | rdma_addr_unregister_client(&addr_client); | 2942 | rdma_addr_unregister_client(&addr_client); |
2851 | ib_sa_unregister_client(&sa_client); | 2943 | ib_sa_unregister_client(&sa_client); |
2852 | destroy_workqueue(cma_wq); | 2944 | destroy_workqueue(cma_wq); |