diff options
author | Chris Leech <christopher.leech@intel.com> | 2008-01-31 19:53:23 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-31 22:28:24 -0500 |
commit | e83a2ea850bf0c0c81c675444080970fc07798c6 (patch) | |
tree | ebdf251be6fa2f9b2b482cd0e6393fdbfc8278a0 /net/core | |
parent | 16ca3f913001efdb6171a2781ef41c77474e3895 (diff) |
[VLAN]: set_rx_mode support for unicast address list
Reuse the existing logic for multicast list synchronization for the
unicast address list. The core of dev_mc_sync/unsync are split out as
__dev_addr_sync/unsync and moved from dev_mcast.c to dev.c. These are
then used to implement dev_unicast_sync/unsync as well.
I'm working on cleaning up Intel's FCoE stack, which generates new MAC
addresses from the fibre channel device id assigned by the fabric as
per the current draft specification in T11. When using such a
protocol in a VLAN environment it would be nice to not always be
forced into promiscuous mode, assuming the underlying Ethernet driver
supports multiple unicast addresses as well.
Signed-off-by: Chris Leech <christopher.leech@intel.com>
Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 96 | ||||
-rw-r--r-- | net/core/dev_mcast.c | 39 |
2 files changed, 101 insertions, 34 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index c9c593e1ba6f..edaff2720e10 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -2962,6 +2962,102 @@ int dev_unicast_add(struct net_device *dev, void *addr, int alen) | |||
2962 | } | 2962 | } |
2963 | EXPORT_SYMBOL(dev_unicast_add); | 2963 | EXPORT_SYMBOL(dev_unicast_add); |
2964 | 2964 | ||
2965 | int __dev_addr_sync(struct dev_addr_list **to, int *to_count, | ||
2966 | struct dev_addr_list **from, int *from_count) | ||
2967 | { | ||
2968 | struct dev_addr_list *da, *next; | ||
2969 | int err = 0; | ||
2970 | |||
2971 | da = *from; | ||
2972 | while (da != NULL) { | ||
2973 | next = da->next; | ||
2974 | if (!da->da_synced) { | ||
2975 | err = __dev_addr_add(to, to_count, | ||
2976 | da->da_addr, da->da_addrlen, 0); | ||
2977 | if (err < 0) | ||
2978 | break; | ||
2979 | da->da_synced = 1; | ||
2980 | da->da_users++; | ||
2981 | } else if (da->da_users == 1) { | ||
2982 | __dev_addr_delete(to, to_count, | ||
2983 | da->da_addr, da->da_addrlen, 0); | ||
2984 | __dev_addr_delete(from, from_count, | ||
2985 | da->da_addr, da->da_addrlen, 0); | ||
2986 | } | ||
2987 | da = next; | ||
2988 | } | ||
2989 | return err; | ||
2990 | } | ||
2991 | |||
2992 | void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, | ||
2993 | struct dev_addr_list **from, int *from_count) | ||
2994 | { | ||
2995 | struct dev_addr_list *da, *next; | ||
2996 | |||
2997 | da = *from; | ||
2998 | while (da != NULL) { | ||
2999 | next = da->next; | ||
3000 | if (da->da_synced) { | ||
3001 | __dev_addr_delete(to, to_count, | ||
3002 | da->da_addr, da->da_addrlen, 0); | ||
3003 | da->da_synced = 0; | ||
3004 | __dev_addr_delete(from, from_count, | ||
3005 | da->da_addr, da->da_addrlen, 0); | ||
3006 | } | ||
3007 | da = next; | ||
3008 | } | ||
3009 | } | ||
3010 | |||
3011 | /** | ||
3012 | * dev_unicast_sync - Synchronize device's unicast list to another device | ||
3013 | * @to: destination device | ||
3014 | * @from: source device | ||
3015 | * | ||
3016 | * Add newly added addresses to the destination device and release | ||
3017 | * addresses that have no users left. The source device must be | ||
3018 | * locked by netif_tx_lock_bh. | ||
3019 | * | ||
3020 | * This function is intended to be called from the dev->set_rx_mode | ||
3021 | * function of layered software devices. | ||
3022 | */ | ||
3023 | int dev_unicast_sync(struct net_device *to, struct net_device *from) | ||
3024 | { | ||
3025 | int err = 0; | ||
3026 | |||
3027 | netif_tx_lock_bh(to); | ||
3028 | err = __dev_addr_sync(&to->uc_list, &to->uc_count, | ||
3029 | &from->uc_list, &from->uc_count); | ||
3030 | if (!err) | ||
3031 | __dev_set_rx_mode(to); | ||
3032 | netif_tx_unlock_bh(to); | ||
3033 | return err; | ||
3034 | } | ||
3035 | EXPORT_SYMBOL(dev_unicast_sync); | ||
3036 | |||
3037 | /** | ||
3038 | * dev_unicast_unsync - Remove synchronized addresses from the destination | ||
3039 | * device | ||
3040 | * @to: destination device | ||
3041 | * @from: source device | ||
3042 | * | ||
3043 | * Remove all addresses that were added to the destination device by | ||
3044 | * dev_unicast_sync(). This function is intended to be called from the | ||
3045 | * dev->stop function of layered software devices. | ||
3046 | */ | ||
3047 | void dev_unicast_unsync(struct net_device *to, struct net_device *from) | ||
3048 | { | ||
3049 | netif_tx_lock_bh(from); | ||
3050 | netif_tx_lock_bh(to); | ||
3051 | |||
3052 | __dev_addr_unsync(&to->uc_list, &to->uc_count, | ||
3053 | &from->uc_list, &from->uc_count); | ||
3054 | __dev_set_rx_mode(to); | ||
3055 | |||
3056 | netif_tx_unlock_bh(to); | ||
3057 | netif_tx_unlock_bh(from); | ||
3058 | } | ||
3059 | EXPORT_SYMBOL(dev_unicast_unsync); | ||
3060 | |||
2965 | static void __dev_addr_discard(struct dev_addr_list **list) | 3061 | static void __dev_addr_discard(struct dev_addr_list **list) |
2966 | { | 3062 | { |
2967 | struct dev_addr_list *tmp; | 3063 | struct dev_addr_list *tmp; |
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c index cadbfbf7e7f5..cec582563e0d 100644 --- a/net/core/dev_mcast.c +++ b/net/core/dev_mcast.c | |||
@@ -113,32 +113,15 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl) | |||
113 | * locked by netif_tx_lock_bh. | 113 | * locked by netif_tx_lock_bh. |
114 | * | 114 | * |
115 | * This function is intended to be called from the dev->set_multicast_list | 115 | * This function is intended to be called from the dev->set_multicast_list |
116 | * function of layered software devices. | 116 | * or dev->set_rx_mode function of layered software devices. |
117 | */ | 117 | */ |
118 | int dev_mc_sync(struct net_device *to, struct net_device *from) | 118 | int dev_mc_sync(struct net_device *to, struct net_device *from) |
119 | { | 119 | { |
120 | struct dev_addr_list *da, *next; | ||
121 | int err = 0; | 120 | int err = 0; |
122 | 121 | ||
123 | netif_tx_lock_bh(to); | 122 | netif_tx_lock_bh(to); |
124 | da = from->mc_list; | 123 | err = __dev_addr_sync(&to->mc_list, &to->mc_count, |
125 | while (da != NULL) { | 124 | &from->mc_list, &from->mc_count); |
126 | next = da->next; | ||
127 | if (!da->da_synced) { | ||
128 | err = __dev_addr_add(&to->mc_list, &to->mc_count, | ||
129 | da->da_addr, da->da_addrlen, 0); | ||
130 | if (err < 0) | ||
131 | break; | ||
132 | da->da_synced = 1; | ||
133 | da->da_users++; | ||
134 | } else if (da->da_users == 1) { | ||
135 | __dev_addr_delete(&to->mc_list, &to->mc_count, | ||
136 | da->da_addr, da->da_addrlen, 0); | ||
137 | __dev_addr_delete(&from->mc_list, &from->mc_count, | ||
138 | da->da_addr, da->da_addrlen, 0); | ||
139 | } | ||
140 | da = next; | ||
141 | } | ||
142 | if (!err) | 125 | if (!err) |
143 | __dev_set_rx_mode(to); | 126 | __dev_set_rx_mode(to); |
144 | netif_tx_unlock_bh(to); | 127 | netif_tx_unlock_bh(to); |
@@ -160,23 +143,11 @@ EXPORT_SYMBOL(dev_mc_sync); | |||
160 | */ | 143 | */ |
161 | void dev_mc_unsync(struct net_device *to, struct net_device *from) | 144 | void dev_mc_unsync(struct net_device *to, struct net_device *from) |
162 | { | 145 | { |
163 | struct dev_addr_list *da, *next; | ||
164 | |||
165 | netif_tx_lock_bh(from); | 146 | netif_tx_lock_bh(from); |
166 | netif_tx_lock_bh(to); | 147 | netif_tx_lock_bh(to); |
167 | 148 | ||
168 | da = from->mc_list; | 149 | __dev_addr_unsync(&to->mc_list, &to->mc_count, |
169 | while (da != NULL) { | 150 | &from->mc_list, &from->mc_count); |
170 | next = da->next; | ||
171 | if (da->da_synced) { | ||
172 | __dev_addr_delete(&to->mc_list, &to->mc_count, | ||
173 | da->da_addr, da->da_addrlen, 0); | ||
174 | da->da_synced = 0; | ||
175 | __dev_addr_delete(&from->mc_list, &from->mc_count, | ||
176 | da->da_addr, da->da_addrlen, 0); | ||
177 | } | ||
178 | da = next; | ||
179 | } | ||
180 | __dev_set_rx_mode(to); | 151 | __dev_set_rx_mode(to); |
181 | 152 | ||
182 | netif_tx_unlock_bh(to); | 153 | netif_tx_unlock_bh(to); |