diff options
author | Jiri Pirko <jpirko@redhat.com> | 2009-07-29 21:06:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-08-02 15:20:46 -0400 |
commit | a6ac65db2329e7685299666f5f7b6093c7b0f3a0 (patch) | |
tree | 02ce50a2a4321cb2872a26196fba6b4d4d41a2ba /net/core | |
parent | 50c643e7652458e649955408685a16e88ea6dbae (diff) |
net: restore the original spinlock to protect unicast list
There is a path when an assetion in dev_unicast_sync() appears.
igmp6_group_added -> dev_mc_add -> __dev_set_rx_mode ->
-> vlan_dev_set_rx_mode -> dev_unicast_sync
Therefore we cannot protect this list with rtnl. This patch restores the
original protecting this list with spinlock.
Signed-off-by: Jiri Pirko <jpirko@redhat.com>
Tested-by: Meelis Roos <mroos@linux.ee>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 25 |
1 files changed, 16 insertions, 9 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 70c27e0c7c32..43e61ba7bd95 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -3865,10 +3865,12 @@ int dev_unicast_delete(struct net_device *dev, void *addr) | |||
3865 | 3865 | ||
3866 | ASSERT_RTNL(); | 3866 | ASSERT_RTNL(); |
3867 | 3867 | ||
3868 | netif_addr_lock_bh(dev); | ||
3868 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, | 3869 | err = __hw_addr_del(&dev->uc, addr, dev->addr_len, |
3869 | NETDEV_HW_ADDR_T_UNICAST); | 3870 | NETDEV_HW_ADDR_T_UNICAST); |
3870 | if (!err) | 3871 | if (!err) |
3871 | __dev_set_rx_mode(dev); | 3872 | __dev_set_rx_mode(dev); |
3873 | netif_addr_unlock_bh(dev); | ||
3872 | return err; | 3874 | return err; |
3873 | } | 3875 | } |
3874 | EXPORT_SYMBOL(dev_unicast_delete); | 3876 | EXPORT_SYMBOL(dev_unicast_delete); |
@@ -3889,10 +3891,12 @@ int dev_unicast_add(struct net_device *dev, void *addr) | |||
3889 | 3891 | ||
3890 | ASSERT_RTNL(); | 3892 | ASSERT_RTNL(); |
3891 | 3893 | ||
3894 | netif_addr_lock_bh(dev); | ||
3892 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, | 3895 | err = __hw_addr_add(&dev->uc, addr, dev->addr_len, |
3893 | NETDEV_HW_ADDR_T_UNICAST); | 3896 | NETDEV_HW_ADDR_T_UNICAST); |
3894 | if (!err) | 3897 | if (!err) |
3895 | __dev_set_rx_mode(dev); | 3898 | __dev_set_rx_mode(dev); |
3899 | netif_addr_unlock_bh(dev); | ||
3896 | return err; | 3900 | return err; |
3897 | } | 3901 | } |
3898 | EXPORT_SYMBOL(dev_unicast_add); | 3902 | EXPORT_SYMBOL(dev_unicast_add); |
@@ -3949,7 +3953,8 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | |||
3949 | * @from: source device | 3953 | * @from: source device |
3950 | * | 3954 | * |
3951 | * Add newly added addresses to the destination device and release | 3955 | * Add newly added addresses to the destination device and release |
3952 | * addresses that have no users left. | 3956 | * addresses that have no users left. The source device must be |
3957 | * locked by netif_tx_lock_bh. | ||
3953 | * | 3958 | * |
3954 | * This function is intended to be called from the dev->set_rx_mode | 3959 | * This function is intended to be called from the dev->set_rx_mode |
3955 | * function of layered software devices. | 3960 | * function of layered software devices. |
@@ -3958,14 +3963,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from) | |||
3958 | { | 3963 | { |
3959 | int err = 0; | 3964 | int err = 0; |
3960 | 3965 | ||
3961 | ASSERT_RTNL(); | ||
3962 | |||
3963 | if (to->addr_len != from->addr_len) | 3966 | if (to->addr_len != from->addr_len) |
3964 | return -EINVAL; | 3967 | return -EINVAL; |
3965 | 3968 | ||
3969 | netif_addr_lock_bh(to); | ||
3966 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); | 3970 | err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len); |
3967 | if (!err) | 3971 | if (!err) |
3968 | __dev_set_rx_mode(to); | 3972 | __dev_set_rx_mode(to); |
3973 | netif_addr_unlock_bh(to); | ||
3969 | return err; | 3974 | return err; |
3970 | } | 3975 | } |
3971 | EXPORT_SYMBOL(dev_unicast_sync); | 3976 | EXPORT_SYMBOL(dev_unicast_sync); |
@@ -3981,28 +3986,30 @@ EXPORT_SYMBOL(dev_unicast_sync); | |||
3981 | */ | 3986 | */ |
3982 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | 3987 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) |
3983 | { | 3988 | { |
3984 | ASSERT_RTNL(); | ||
3985 | |||
3986 | if (to->addr_len != from->addr_len) | 3989 | if (to->addr_len != from->addr_len) |
3987 | return; | 3990 | return; |
3988 | 3991 | ||
3992 | netif_addr_lock_bh(from); | ||
3993 | netif_addr_lock(to); | ||
3989 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); | 3994 | __hw_addr_unsync(&to->uc, &from->uc, to->addr_len); |
3990 | __dev_set_rx_mode(to); | 3995 | __dev_set_rx_mode(to); |
3996 | netif_addr_unlock(to); | ||
3997 | netif_addr_unlock_bh(from); | ||
3991 | } | 3998 | } |
3992 | EXPORT_SYMBOL(dev_unicast_unsync); | 3999 | EXPORT_SYMBOL(dev_unicast_unsync); |
3993 | 4000 | ||
3994 | static void dev_unicast_flush(struct net_device *dev) | 4001 | static void dev_unicast_flush(struct net_device *dev) |
3995 | { | 4002 | { |
3996 | /* rtnl_mutex must be held here */ | 4003 | netif_addr_lock_bh(dev); |
3997 | |||
3998 | __hw_addr_flush(&dev->uc); | 4004 | __hw_addr_flush(&dev->uc); |
4005 | netif_addr_unlock_bh(dev); | ||
3999 | } | 4006 | } |
4000 | 4007 | ||
4001 | static void dev_unicast_init(struct net_device *dev) | 4008 | static void dev_unicast_init(struct net_device *dev) |
4002 | { | 4009 | { |
4003 | /* rtnl_mutex must be held here */ | 4010 | netif_addr_lock_bh(dev); |
4004 | |||
4005 | __hw_addr_init(&dev->uc); | 4011 | __hw_addr_init(&dev->uc); |
4012 | netif_addr_unlock_bh(dev); | ||
4006 | } | 4013 | } |
4007 | 4014 | ||
4008 | 4015 | ||