aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br.c5
-rw-r--r--net/bridge/br_device.c120
-rw-r--r--net/bridge/br_fdb.c330
-rw-r--r--net/bridge/br_forward.c20
-rw-r--r--net/bridge/br_if.c160
-rw-r--r--net/bridge/br_input.c48
-rw-r--r--net/bridge/br_ioctl.c42
-rw-r--r--net/bridge/br_multicast.c183
-rw-r--r--net/bridge/br_netfilter.c202
-rw-r--r--net/bridge/br_netlink.c68
-rw-r--r--net/bridge/br_notify.c17
-rw-r--r--net/bridge/br_private.h50
-rw-r--r--net/bridge/br_private_stp.h13
-rw-r--r--net/bridge/br_stp.c87
-rw-r--r--net/bridge/br_stp_bpdu.c10
-rw-r--r--net/bridge/br_stp_if.c34
-rw-r--r--net/bridge/br_stp_timer.c1
-rw-r--r--net/bridge/br_sysfs_br.c39
-rw-r--r--net/bridge/br_sysfs_if.c26
-rw-r--r--net/bridge/netfilter/ebt_ip6.c46
-rw-r--r--net/bridge/netfilter/ebt_vlan.c25
-rw-r--r--net/bridge/netfilter/ebtable_broute.c3
-rw-r--r--net/bridge/netfilter/ebtables.c97
24 files changed, 1030 insertions, 597 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae462cb4..6dee7bf648a9 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@ config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP 8 select STP
9 depends on IPV6 || IPV6=n
9 ---help--- 10 ---help---
10 If you say Y here, then your Linux box will be able to act as an 11 If you say Y here, then your Linux box will be able to act as an
11 Ethernet bridge, which means that the different Ethernet segments it 12 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br.c b/net/bridge/br.c
index c8436fa31344..f20c4fd915a8 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,8 +22,6 @@
22 22
23#include "br_private.h" 23#include "br_private.h"
24 24
25int (*br_should_route_hook)(struct sk_buff *skb);
26
27static const struct stp_proto br_stp_proto = { 25static const struct stp_proto br_stp_proto = {
28 .rcv = br_stp_rcv, 26 .rcv = br_stp_rcv,
29}; 27};
@@ -102,9 +100,8 @@ static void __exit br_deinit(void)
102 br_fdb_fini(); 100 br_fdb_fini();
103} 101}
104 102
105EXPORT_SYMBOL(br_should_route_hook);
106
107module_init(br_init) 103module_init(br_init)
108module_exit(br_deinit) 104module_exit(br_deinit)
109MODULE_LICENSE("GPL"); 105MODULE_LICENSE("GPL");
110MODULE_VERSION(BR_VERSION); 106MODULE_VERSION(BR_VERSION);
107MODULE_ALIAS_RTNL_LINK("bridge");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index cf09fe591fc2..32b8f9f7f79e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -49,7 +49,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
49 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
50 50
51 rcu_read_lock(); 51 rcu_read_lock();
52 if (is_multicast_ether_addr(dest)) { 52 if (is_broadcast_ether_addr(dest))
53 br_flood_deliver(br, skb);
54 else if (is_multicast_ether_addr(dest)) {
53 if (unlikely(netpoll_tx_running(dev))) { 55 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb); 56 br_flood_deliver(br, skb);
55 goto out; 57 goto out;
@@ -74,11 +76,23 @@ out:
74 return NETDEV_TX_OK; 76 return NETDEV_TX_OK;
75} 77}
76 78
79static int br_dev_init(struct net_device *dev)
80{
81 struct net_bridge *br = netdev_priv(dev);
82
83 br->stats = alloc_percpu(struct br_cpu_netstats);
84 if (!br->stats)
85 return -ENOMEM;
86
87 return 0;
88}
89
77static int br_dev_open(struct net_device *dev) 90static int br_dev_open(struct net_device *dev)
78{ 91{
79 struct net_bridge *br = netdev_priv(dev); 92 struct net_bridge *br = netdev_priv(dev);
80 93
81 br_features_recompute(br); 94 netif_carrier_off(dev);
95 netdev_update_features(dev);
82 netif_start_queue(dev); 96 netif_start_queue(dev);
83 br_stp_enable_bridge(br); 97 br_stp_enable_bridge(br);
84 br_multicast_open(br); 98 br_multicast_open(br);
@@ -94,6 +108,8 @@ static int br_dev_stop(struct net_device *dev)
94{ 108{
95 struct net_bridge *br = netdev_priv(dev); 109 struct net_bridge *br = netdev_priv(dev);
96 110
111 netif_carrier_off(dev);
112
97 br_stp_disable_bridge(br); 113 br_stp_disable_bridge(br);
98 br_multicast_stop(br); 114 br_multicast_stop(br);
99 115
@@ -141,7 +157,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
141 157
142#ifdef CONFIG_BRIDGE_NETFILTER 158#ifdef CONFIG_BRIDGE_NETFILTER
143 /* remember the MTU in the rtable for PMTU */ 159 /* remember the MTU in the rtable for PMTU */
144 br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu; 160 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
145#endif 161#endif
146 162
147 return 0; 163 return 0;
@@ -173,43 +189,11 @@ static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
173 strcpy(info->bus_info, "N/A"); 189 strcpy(info->bus_info, "N/A");
174} 190}
175 191
176static int br_set_sg(struct net_device *dev, u32 data) 192static u32 br_fix_features(struct net_device *dev, u32 features)
177{
178 struct net_bridge *br = netdev_priv(dev);
179
180 if (data)
181 br->feature_mask |= NETIF_F_SG;
182 else
183 br->feature_mask &= ~NETIF_F_SG;
184
185 br_features_recompute(br);
186 return 0;
187}
188
189static int br_set_tso(struct net_device *dev, u32 data)
190{ 193{
191 struct net_bridge *br = netdev_priv(dev); 194 struct net_bridge *br = netdev_priv(dev);
192 195
193 if (data) 196 return br_features_recompute(br, features);
194 br->feature_mask |= NETIF_F_TSO;
195 else
196 br->feature_mask &= ~NETIF_F_TSO;
197
198 br_features_recompute(br);
199 return 0;
200}
201
202static int br_set_tx_csum(struct net_device *dev, u32 data)
203{
204 struct net_bridge *br = netdev_priv(dev);
205
206 if (data)
207 br->feature_mask |= NETIF_F_NO_CSUM;
208 else
209 br->feature_mask &= ~NETIF_F_ALL_CSUM;
210
211 br_features_recompute(br);
212 return 0;
213} 197}
214 198
215#ifdef CONFIG_NET_POLL_CONTROLLER 199#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -261,6 +245,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
261 goto out; 245 goto out;
262 246
263 np->dev = p->dev; 247 np->dev = p->dev;
248 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
264 249
265 err = __netpoll_setup(np); 250 err = __netpoll_setup(np);
266 if (err) { 251 if (err) {
@@ -292,23 +277,30 @@ void br_netpoll_disable(struct net_bridge_port *p)
292 277
293#endif 278#endif
294 279
280static int br_add_slave(struct net_device *dev, struct net_device *slave_dev)
281
282{
283 struct net_bridge *br = netdev_priv(dev);
284
285 return br_add_if(br, slave_dev);
286}
287
288static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
289{
290 struct net_bridge *br = netdev_priv(dev);
291
292 return br_del_if(br, slave_dev);
293}
294
295static const struct ethtool_ops br_ethtool_ops = { 295static const struct ethtool_ops br_ethtool_ops = {
296 .get_drvinfo = br_getinfo, 296 .get_drvinfo = br_getinfo,
297 .get_link = ethtool_op_get_link, 297 .get_link = ethtool_op_get_link,
298 .get_tx_csum = ethtool_op_get_tx_csum,
299 .set_tx_csum = br_set_tx_csum,
300 .get_sg = ethtool_op_get_sg,
301 .set_sg = br_set_sg,
302 .get_tso = ethtool_op_get_tso,
303 .set_tso = br_set_tso,
304 .get_ufo = ethtool_op_get_ufo,
305 .set_ufo = ethtool_op_set_ufo,
306 .get_flags = ethtool_op_get_flags,
307}; 298};
308 299
309static const struct net_device_ops br_netdev_ops = { 300static const struct net_device_ops br_netdev_ops = {
310 .ndo_open = br_dev_open, 301 .ndo_open = br_dev_open,
311 .ndo_stop = br_dev_stop, 302 .ndo_stop = br_dev_stop,
303 .ndo_init = br_dev_init,
312 .ndo_start_xmit = br_dev_xmit, 304 .ndo_start_xmit = br_dev_xmit,
313 .ndo_get_stats64 = br_get_stats64, 305 .ndo_get_stats64 = br_get_stats64,
314 .ndo_set_mac_address = br_set_mac_address, 306 .ndo_set_mac_address = br_set_mac_address,
@@ -320,6 +312,9 @@ static const struct net_device_ops br_netdev_ops = {
320 .ndo_netpoll_cleanup = br_netpoll_cleanup, 312 .ndo_netpoll_cleanup = br_netpoll_cleanup,
321 .ndo_poll_controller = br_poll_controller, 313 .ndo_poll_controller = br_poll_controller,
322#endif 314#endif
315 .ndo_add_slave = br_add_slave,
316 .ndo_del_slave = br_del_slave,
317 .ndo_fix_features = br_fix_features,
323}; 318};
324 319
325static void br_dev_free(struct net_device *dev) 320static void br_dev_free(struct net_device *dev)
@@ -330,18 +325,49 @@ static void br_dev_free(struct net_device *dev)
330 free_netdev(dev); 325 free_netdev(dev);
331} 326}
332 327
328static struct device_type br_type = {
329 .name = "bridge",
330};
331
333void br_dev_setup(struct net_device *dev) 332void br_dev_setup(struct net_device *dev)
334{ 333{
334 struct net_bridge *br = netdev_priv(dev);
335
335 random_ether_addr(dev->dev_addr); 336 random_ether_addr(dev->dev_addr);
336 ether_setup(dev); 337 ether_setup(dev);
337 338
338 dev->netdev_ops = &br_netdev_ops; 339 dev->netdev_ops = &br_netdev_ops;
339 dev->destructor = br_dev_free; 340 dev->destructor = br_dev_free;
340 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 341 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
342 SET_NETDEV_DEVTYPE(dev, &br_type);
341 dev->tx_queue_len = 0; 343 dev->tx_queue_len = 0;
342 dev->priv_flags = IFF_EBRIDGE; 344 dev->priv_flags = IFF_EBRIDGE;
343 345
344 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | 346 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
345 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX | 347 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_LLTX |
346 NETIF_F_NETNS_LOCAL | NETIF_F_GSO; 348 NETIF_F_NETNS_LOCAL | NETIF_F_HW_VLAN_TX;
349 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
350 NETIF_F_GSO_MASK | NETIF_F_NO_CSUM |
351 NETIF_F_HW_VLAN_TX;
352
353 br->dev = dev;
354 spin_lock_init(&br->lock);
355 INIT_LIST_HEAD(&br->port_list);
356 spin_lock_init(&br->hash_lock);
357
358 br->bridge_id.prio[0] = 0x80;
359 br->bridge_id.prio[1] = 0x00;
360
361 memcpy(br->group_addr, br_group_address, ETH_ALEN);
362
363 br->stp_enabled = BR_NO_STP;
364 br->designated_root = br->bridge_id;
365 br->bridge_max_age = br->max_age = 20 * HZ;
366 br->bridge_hello_time = br->hello_time = 2 * HZ;
367 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
368 br->ageing_time = 300 * HZ;
369
370 br_netfilter_rtable_init(br);
371 br_stp_timer_init(br);
372 br_multicast_init(br);
347} 373}
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 90512ccfd3e9..e0dfbc151dd7 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -28,6 +28,7 @@
28static struct kmem_cache *br_fdb_cache __read_mostly; 28static struct kmem_cache *br_fdb_cache __read_mostly;
29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 29static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
30 const unsigned char *addr); 30 const unsigned char *addr);
31static void fdb_notify(const struct net_bridge_fdb_entry *, int);
31 32
32static u32 fdb_salt __read_mostly; 33static u32 fdb_salt __read_mostly;
33 34
@@ -62,7 +63,7 @@ static inline int has_expired(const struct net_bridge *br,
62 const struct net_bridge_fdb_entry *fdb) 63 const struct net_bridge_fdb_entry *fdb)
63{ 64{
64 return !fdb->is_static && 65 return !fdb->is_static &&
65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 66 time_before_eq(fdb->updated + hold_time(br), jiffies);
66} 67}
67 68
68static inline int br_mac_hash(const unsigned char *mac) 69static inline int br_mac_hash(const unsigned char *mac)
@@ -81,6 +82,7 @@ static void fdb_rcu_free(struct rcu_head *head)
81 82
82static inline void fdb_delete(struct net_bridge_fdb_entry *f) 83static inline void fdb_delete(struct net_bridge_fdb_entry *f)
83{ 84{
85 fdb_notify(f, RTM_DELNEIGH);
84 hlist_del_rcu(&f->hlist); 86 hlist_del_rcu(&f->hlist);
85 call_rcu(&f->rcu, fdb_rcu_free); 87 call_rcu(&f->rcu, fdb_rcu_free);
86} 88}
@@ -140,7 +142,7 @@ void br_fdb_cleanup(unsigned long _data)
140 unsigned long this_timer; 142 unsigned long this_timer;
141 if (f->is_static) 143 if (f->is_static)
142 continue; 144 continue;
143 this_timer = f->ageing_timer + delay; 145 this_timer = f->updated + delay;
144 if (time_before_eq(this_timer, jiffies)) 146 if (time_before_eq(this_timer, jiffies))
145 fdb_delete(f); 147 fdb_delete(f);
146 else if (time_before(this_timer, next_timer)) 148 else if (time_before(this_timer, next_timer))
@@ -169,7 +171,7 @@ void br_fdb_flush(struct net_bridge *br)
169 spin_unlock_bh(&br->hash_lock); 171 spin_unlock_bh(&br->hash_lock);
170} 172}
171 173
172/* Flush all entries refering to a specific port. 174/* Flush all entries referring to a specific port.
173 * if do_all is set also flush static entries 175 * if do_all is set also flush static entries
174 */ 176 */
175void br_fdb_delete_by_port(struct net_bridge *br, 177void br_fdb_delete_by_port(struct net_bridge *br,
@@ -238,15 +240,18 @@ struct net_bridge_fdb_entry *__br_fdb_get(struct net_bridge *br,
238int br_fdb_test_addr(struct net_device *dev, unsigned char *addr) 240int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
239{ 241{
240 struct net_bridge_fdb_entry *fdb; 242 struct net_bridge_fdb_entry *fdb;
243 struct net_bridge_port *port;
241 int ret; 244 int ret;
242 245
243 if (!br_port_exists(dev))
244 return 0;
245
246 rcu_read_lock(); 246 rcu_read_lock();
247 fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr); 247 port = br_port_get_rcu(dev);
248 ret = fdb && fdb->dst->dev != dev && 248 if (!port)
249 fdb->dst->state == BR_STATE_FORWARDING; 249 ret = 0;
250 else {
251 fdb = __br_fdb_get(port->br, addr);
252 ret = fdb && fdb->dst->dev != dev &&
253 fdb->dst->state == BR_STATE_FORWARDING;
254 }
250 rcu_read_unlock(); 255 rcu_read_unlock();
251 256
252 return ret; 257 return ret;
@@ -290,7 +295,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
290 295
291 fe->is_local = f->is_local; 296 fe->is_local = f->is_local;
292 if (!f->is_static) 297 if (!f->is_static)
293 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->ageing_timer); 298 fe->ageing_timer_value = jiffies_to_clock_t(jiffies - f->updated);
294 ++fe; 299 ++fe;
295 ++num; 300 ++num;
296 } 301 }
@@ -302,8 +307,21 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf,
302 return num; 307 return num;
303} 308}
304 309
305static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head, 310static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
306 const unsigned char *addr) 311 const unsigned char *addr)
312{
313 struct hlist_node *h;
314 struct net_bridge_fdb_entry *fdb;
315
316 hlist_for_each_entry(fdb, h, head, hlist) {
317 if (!compare_ether_addr(fdb->addr.addr, addr))
318 return fdb;
319 }
320 return NULL;
321}
322
323static struct net_bridge_fdb_entry *fdb_find_rcu(struct hlist_head *head,
324 const unsigned char *addr)
307{ 325{
308 struct hlist_node *h; 326 struct hlist_node *h;
309 struct net_bridge_fdb_entry *fdb; 327 struct net_bridge_fdb_entry *fdb;
@@ -317,20 +335,19 @@ static inline struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
317 335
318static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head, 336static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
319 struct net_bridge_port *source, 337 struct net_bridge_port *source,
320 const unsigned char *addr, 338 const unsigned char *addr)
321 int is_local)
322{ 339{
323 struct net_bridge_fdb_entry *fdb; 340 struct net_bridge_fdb_entry *fdb;
324 341
325 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC); 342 fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
326 if (fdb) { 343 if (fdb) {
327 memcpy(fdb->addr.addr, addr, ETH_ALEN); 344 memcpy(fdb->addr.addr, addr, ETH_ALEN);
328 hlist_add_head_rcu(&fdb->hlist, head);
329
330 fdb->dst = source; 345 fdb->dst = source;
331 fdb->is_local = is_local; 346 fdb->is_local = 0;
332 fdb->is_static = is_local; 347 fdb->is_static = 0;
333 fdb->ageing_timer = jiffies; 348 fdb->updated = fdb->used = jiffies;
349 hlist_add_head_rcu(&fdb->hlist, head);
350 fdb_notify(fdb, RTM_NEWNEIGH);
334 } 351 }
335 return fdb; 352 return fdb;
336} 353}
@@ -357,12 +374,15 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
357 fdb_delete(fdb); 374 fdb_delete(fdb);
358 } 375 }
359 376
360 if (!fdb_create(head, source, addr, 1)) 377 fdb = fdb_create(head, source, addr);
378 if (!fdb)
361 return -ENOMEM; 379 return -ENOMEM;
362 380
381 fdb->is_local = fdb->is_static = 1;
363 return 0; 382 return 0;
364} 383}
365 384
385/* Add entry for local address of interface */
366int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source, 386int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
367 const unsigned char *addr) 387 const unsigned char *addr)
368{ 388{
@@ -389,7 +409,7 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
389 source->state == BR_STATE_FORWARDING)) 409 source->state == BR_STATE_FORWARDING))
390 return; 410 return;
391 411
392 fdb = fdb_find(head, addr); 412 fdb = fdb_find_rcu(head, addr);
393 if (likely(fdb)) { 413 if (likely(fdb)) {
394 /* attempt to update an entry for a local interface */ 414 /* attempt to update an entry for a local interface */
395 if (unlikely(fdb->is_local)) { 415 if (unlikely(fdb->is_local)) {
@@ -400,15 +420,277 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
400 } else { 420 } else {
401 /* fastpath: update of existing entry */ 421 /* fastpath: update of existing entry */
402 fdb->dst = source; 422 fdb->dst = source;
403 fdb->ageing_timer = jiffies; 423 fdb->updated = jiffies;
404 } 424 }
405 } else { 425 } else {
406 spin_lock(&br->hash_lock); 426 spin_lock(&br->hash_lock);
407 if (!fdb_find(head, addr)) 427 if (likely(!fdb_find(head, addr)))
408 fdb_create(head, source, addr, 0); 428 fdb_create(head, source, addr);
429
409 /* else we lose race and someone else inserts 430 /* else we lose race and someone else inserts
410 * it first, don't bother updating 431 * it first, don't bother updating
411 */ 432 */
412 spin_unlock(&br->hash_lock); 433 spin_unlock(&br->hash_lock);
413 } 434 }
414} 435}
436
437static int fdb_to_nud(const struct net_bridge_fdb_entry *fdb)
438{
439 if (fdb->is_local)
440 return NUD_PERMANENT;
441 else if (fdb->is_static)
442 return NUD_NOARP;
443 else if (has_expired(fdb->dst->br, fdb))
444 return NUD_STALE;
445 else
446 return NUD_REACHABLE;
447}
448
449static int fdb_fill_info(struct sk_buff *skb,
450 const struct net_bridge_fdb_entry *fdb,
451 u32 pid, u32 seq, int type, unsigned int flags)
452{
453 unsigned long now = jiffies;
454 struct nda_cacheinfo ci;
455 struct nlmsghdr *nlh;
456 struct ndmsg *ndm;
457
458 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
459 if (nlh == NULL)
460 return -EMSGSIZE;
461
462
463 ndm = nlmsg_data(nlh);
464 ndm->ndm_family = AF_BRIDGE;
465 ndm->ndm_pad1 = 0;
466 ndm->ndm_pad2 = 0;
467 ndm->ndm_flags = 0;
468 ndm->ndm_type = 0;
469 ndm->ndm_ifindex = fdb->dst->dev->ifindex;
470 ndm->ndm_state = fdb_to_nud(fdb);
471
472 NLA_PUT(skb, NDA_LLADDR, ETH_ALEN, &fdb->addr);
473
474 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
475 ci.ndm_confirmed = 0;
476 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
477 ci.ndm_refcnt = 0;
478 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
479
480 return nlmsg_end(skb, nlh);
481
482nla_put_failure:
483 nlmsg_cancel(skb, nlh);
484 return -EMSGSIZE;
485}
486
487static inline size_t fdb_nlmsg_size(void)
488{
489 return NLMSG_ALIGN(sizeof(struct ndmsg))
490 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
491 + nla_total_size(sizeof(struct nda_cacheinfo));
492}
493
494static void fdb_notify(const struct net_bridge_fdb_entry *fdb, int type)
495{
496 struct net *net = dev_net(fdb->dst->dev);
497 struct sk_buff *skb;
498 int err = -ENOBUFS;
499
500 skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
501 if (skb == NULL)
502 goto errout;
503
504 err = fdb_fill_info(skb, fdb, 0, 0, type, 0);
505 if (err < 0) {
506 /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
507 WARN_ON(err == -EMSGSIZE);
508 kfree_skb(skb);
509 goto errout;
510 }
511 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
512 return;
513errout:
514 if (err < 0)
515 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
516}
517
518/* Dump information about entries, in response to GETNEIGH */
519int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
520{
521 struct net *net = sock_net(skb->sk);
522 struct net_device *dev;
523 int idx = 0;
524
525 rcu_read_lock();
526 for_each_netdev_rcu(net, dev) {
527 struct net_bridge *br = netdev_priv(dev);
528 int i;
529
530 if (!(dev->priv_flags & IFF_EBRIDGE))
531 continue;
532
533 for (i = 0; i < BR_HASH_SIZE; i++) {
534 struct hlist_node *h;
535 struct net_bridge_fdb_entry *f;
536
537 hlist_for_each_entry_rcu(f, h, &br->hash[i], hlist) {
538 if (idx < cb->args[0])
539 goto skip;
540
541 if (fdb_fill_info(skb, f,
542 NETLINK_CB(cb->skb).pid,
543 cb->nlh->nlmsg_seq,
544 RTM_NEWNEIGH,
545 NLM_F_MULTI) < 0)
546 break;
547skip:
548 ++idx;
549 }
550 }
551 }
552 rcu_read_unlock();
553
554 cb->args[0] = idx;
555
556 return skb->len;
557}
558
559/* Create new static fdb entry */
560static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
561 __u16 state)
562{
563 struct net_bridge *br = source->br;
564 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
565 struct net_bridge_fdb_entry *fdb;
566
567 fdb = fdb_find(head, addr);
568 if (fdb)
569 return -EEXIST;
570
571 fdb = fdb_create(head, source, addr);
572 if (!fdb)
573 return -ENOMEM;
574
575 if (state & NUD_PERMANENT)
576 fdb->is_local = fdb->is_static = 1;
577 else if (state & NUD_NOARP)
578 fdb->is_static = 1;
579 return 0;
580}
581
582/* Add new permanent fdb entry with RTM_NEWNEIGH */
583int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
584{
585 struct net *net = sock_net(skb->sk);
586 struct ndmsg *ndm;
587 struct nlattr *tb[NDA_MAX+1];
588 struct net_device *dev;
589 struct net_bridge_port *p;
590 const __u8 *addr;
591 int err;
592
593 ASSERT_RTNL();
594 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
595 if (err < 0)
596 return err;
597
598 ndm = nlmsg_data(nlh);
599 if (ndm->ndm_ifindex == 0) {
600 pr_info("bridge: RTM_NEWNEIGH with invalid ifindex\n");
601 return -EINVAL;
602 }
603
604 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
605 if (dev == NULL) {
606 pr_info("bridge: RTM_NEWNEIGH with unknown ifindex\n");
607 return -ENODEV;
608 }
609
610 if (!tb[NDA_LLADDR] || nla_len(tb[NDA_LLADDR]) != ETH_ALEN) {
611 pr_info("bridge: RTM_NEWNEIGH with invalid address\n");
612 return -EINVAL;
613 }
614
615 addr = nla_data(tb[NDA_LLADDR]);
616 if (!is_valid_ether_addr(addr)) {
617 pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
618 return -EINVAL;
619 }
620
621 p = br_port_get_rtnl(dev);
622 if (p == NULL) {
623 pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
624 dev->name);
625 return -EINVAL;
626 }
627
628 spin_lock_bh(&p->br->hash_lock);
629 err = fdb_add_entry(p, addr, ndm->ndm_state);
630 spin_unlock_bh(&p->br->hash_lock);
631
632 return err;
633}
634
635static int fdb_delete_by_addr(struct net_bridge_port *p, const u8 *addr)
636{
637 struct net_bridge *br = p->br;
638 struct hlist_head *head = &br->hash[br_mac_hash(addr)];
639 struct net_bridge_fdb_entry *fdb;
640
641 fdb = fdb_find(head, addr);
642 if (!fdb)
643 return -ENOENT;
644
645 fdb_delete(fdb);
646 return 0;
647}
648
649/* Remove neighbor entry with RTM_DELNEIGH */
650int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
651{
652 struct net *net = sock_net(skb->sk);
653 struct ndmsg *ndm;
654 struct net_bridge_port *p;
655 struct nlattr *llattr;
656 const __u8 *addr;
657 struct net_device *dev;
658 int err;
659
660 ASSERT_RTNL();
661 if (nlmsg_len(nlh) < sizeof(*ndm))
662 return -EINVAL;
663
664 ndm = nlmsg_data(nlh);
665 if (ndm->ndm_ifindex == 0) {
666 pr_info("bridge: RTM_DELNEIGH with invalid ifindex\n");
667 return -EINVAL;
668 }
669
670 dev = __dev_get_by_index(net, ndm->ndm_ifindex);
671 if (dev == NULL) {
672 pr_info("bridge: RTM_DELNEIGH with unknown ifindex\n");
673 return -ENODEV;
674 }
675
676 llattr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_LLADDR);
677 if (llattr == NULL || nla_len(llattr) != ETH_ALEN) {
678 pr_info("bridge: RTM_DELNEIGH with invalid address\n");
679 return -EINVAL;
680 }
681
682 addr = nla_data(llattr);
683
684 p = br_port_get_rtnl(dev);
685 if (p == NULL) {
686 pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
687 dev->name);
688 return -EINVAL;
689 }
690
691 spin_lock_bh(&p->br->hash_lock);
692 err = fdb_delete_by_addr(p, addr);
693 spin_unlock_bh(&p->br->hash_lock);
694
695 return err;
696}
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index cbfe87f0f34a..ee64287f1290 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -41,17 +41,13 @@ static inline unsigned packet_length(const struct sk_buff *skb)
41 41
42int br_dev_queue_push_xmit(struct sk_buff *skb) 42int br_dev_queue_push_xmit(struct sk_buff *skb)
43{ 43{
44 /* drop mtu oversized packets except gso */ 44 /* ip_fragment doesn't copy the MAC header */
45 if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) 45 if (nf_bridge_maybe_copy_header(skb) ||
46 (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))) {
46 kfree_skb(skb); 47 kfree_skb(skb);
47 else { 48 } else {
48 /* ip_fragment doesn't copy the MAC header */ 49 skb_push(skb, ETH_HLEN);
49 if (nf_bridge_maybe_copy_header(skb)) 50 dev_queue_xmit(skb);
50 kfree_skb(skb);
51 else {
52 skb_push(skb, ETH_HLEN);
53 dev_queue_xmit(skb);
54 }
55 } 51 }
56 52
57 return 0; 53 return 0;
@@ -223,7 +219,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
223 struct net_bridge_port_group *p; 219 struct net_bridge_port_group *p;
224 struct hlist_node *rp; 220 struct hlist_node *rp;
225 221
226 rp = rcu_dereference(br->router_list.first); 222 rp = rcu_dereference(hlist_first_rcu(&br->router_list));
227 p = mdst ? rcu_dereference(mdst->ports) : NULL; 223 p = mdst ? rcu_dereference(mdst->ports) : NULL;
228 while (p || rp) { 224 while (p || rp) {
229 struct net_bridge_port *port, *lport, *rport; 225 struct net_bridge_port *port, *lport, *rport;
@@ -242,7 +238,7 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
242 if ((unsigned long)lport >= (unsigned long)port) 238 if ((unsigned long)lport >= (unsigned long)port)
243 p = rcu_dereference(p->next); 239 p = rcu_dereference(p->next);
244 if ((unsigned long)rport >= (unsigned long)port) 240 if ((unsigned long)rport >= (unsigned long)port)
245 rp = rcu_dereference(rp->next); 241 rp = rcu_dereference(hlist_next_rcu(rp));
246 } 242 }
247 243
248 if (!prev) 244 if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index c03d2c3ff03e..1bacca4cb676 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -36,8 +36,8 @@ static int port_cost(struct net_device *dev)
36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) { 36 if (dev->ethtool_ops && dev->ethtool_ops->get_settings) {
37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, }; 37 struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET, };
38 38
39 if (!dev->ethtool_ops->get_settings(dev, &ecmd)) { 39 if (!dev_ethtool_get_settings(dev, &ecmd)) {
40 switch(ecmd.speed) { 40 switch (ethtool_cmd_speed(&ecmd)) {
41 case SPEED_10000: 41 case SPEED_10000:
42 return 2; 42 return 2;
43 case SPEED_1000: 43 case SPEED_1000:
@@ -61,30 +61,27 @@ static int port_cost(struct net_device *dev)
61} 61}
62 62
63 63
64/* 64/* Check for port carrier transistions. */
65 * Check for port carrier transistions.
66 * Called from work queue to allow for calling functions that
67 * might sleep (such as speed check), and to debounce.
68 */
69void br_port_carrier_check(struct net_bridge_port *p) 65void br_port_carrier_check(struct net_bridge_port *p)
70{ 66{
71 struct net_device *dev = p->dev; 67 struct net_device *dev = p->dev;
72 struct net_bridge *br = p->br; 68 struct net_bridge *br = p->br;
73 69
74 if (netif_carrier_ok(dev)) 70 if (netif_running(dev) && netif_carrier_ok(dev))
75 p->path_cost = port_cost(dev); 71 p->path_cost = port_cost(dev);
76 72
77 if (netif_running(br->dev)) { 73 if (!netif_running(br->dev))
78 spin_lock_bh(&br->lock); 74 return;
79 if (netif_carrier_ok(dev)) { 75
80 if (p->state == BR_STATE_DISABLED) 76 spin_lock_bh(&br->lock);
81 br_stp_enable_port(p); 77 if (netif_running(dev) && netif_carrier_ok(dev)) {
82 } else { 78 if (p->state == BR_STATE_DISABLED)
83 if (p->state != BR_STATE_DISABLED) 79 br_stp_enable_port(p);
84 br_stp_disable_port(p); 80 } else {
85 } 81 if (p->state != BR_STATE_DISABLED)
86 spin_unlock_bh(&br->lock); 82 br_stp_disable_port(p);
87 } 83 }
84 spin_unlock_bh(&br->lock);
88} 85}
89 86
90static void release_nbp(struct kobject *kobj) 87static void release_nbp(struct kobject *kobj)
@@ -150,6 +147,9 @@ static void del_nbp(struct net_bridge_port *p)
150 dev->priv_flags &= ~IFF_BRIDGE_PORT; 147 dev->priv_flags &= ~IFF_BRIDGE_PORT;
151 148
152 netdev_rx_handler_unregister(dev); 149 netdev_rx_handler_unregister(dev);
150 synchronize_net();
151
152 netdev_set_master(dev, NULL);
153 153
154 br_multicast_del_port(p); 154 br_multicast_del_port(p);
155 155
@@ -176,56 +176,6 @@ static void del_br(struct net_bridge *br, struct list_head *head)
176 unregister_netdevice_queue(br->dev, head); 176 unregister_netdevice_queue(br->dev, head);
177} 177}
178 178
179static struct net_device *new_bridge_dev(struct net *net, const char *name)
180{
181 struct net_bridge *br;
182 struct net_device *dev;
183
184 dev = alloc_netdev(sizeof(struct net_bridge), name,
185 br_dev_setup);
186
187 if (!dev)
188 return NULL;
189 dev_net_set(dev, net);
190
191 br = netdev_priv(dev);
192 br->dev = dev;
193
194 br->stats = alloc_percpu(struct br_cpu_netstats);
195 if (!br->stats) {
196 free_netdev(dev);
197 return NULL;
198 }
199
200 spin_lock_init(&br->lock);
201 INIT_LIST_HEAD(&br->port_list);
202 spin_lock_init(&br->hash_lock);
203
204 br->bridge_id.prio[0] = 0x80;
205 br->bridge_id.prio[1] = 0x00;
206
207 memcpy(br->group_addr, br_group_address, ETH_ALEN);
208
209 br->feature_mask = dev->features;
210 br->stp_enabled = BR_NO_STP;
211 br->designated_root = br->bridge_id;
212 br->root_path_cost = 0;
213 br->root_port = 0;
214 br->bridge_max_age = br->max_age = 20 * HZ;
215 br->bridge_hello_time = br->hello_time = 2 * HZ;
216 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
217 br->topology_change = 0;
218 br->topology_change_detected = 0;
219 br->ageing_time = 300 * HZ;
220
221 br_netfilter_rtable_init(br);
222
223 br_stp_timer_init(br);
224 br_multicast_init(br);
225
226 return dev;
227}
228
229/* find an available port number */ 179/* find an available port number */
230static int find_portno(struct net_bridge *br) 180static int find_portno(struct net_bridge *br)
231{ 181{
@@ -278,42 +228,19 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
278 return p; 228 return p;
279} 229}
280 230
281static struct device_type br_type = {
282 .name = "bridge",
283};
284
285int br_add_bridge(struct net *net, const char *name) 231int br_add_bridge(struct net *net, const char *name)
286{ 232{
287 struct net_device *dev; 233 struct net_device *dev;
288 int ret;
289 234
290 dev = new_bridge_dev(net, name); 235 dev = alloc_netdev(sizeof(struct net_bridge), name,
236 br_dev_setup);
237
291 if (!dev) 238 if (!dev)
292 return -ENOMEM; 239 return -ENOMEM;
293 240
294 rtnl_lock(); 241 dev_net_set(dev, net);
295 if (strchr(dev->name, '%')) {
296 ret = dev_alloc_name(dev, dev->name);
297 if (ret < 0)
298 goto out_free;
299 }
300
301 SET_NETDEV_DEVTYPE(dev, &br_type);
302
303 ret = register_netdevice(dev);
304 if (ret)
305 goto out_free;
306
307 ret = br_sysfs_addbr(dev);
308 if (ret)
309 unregister_netdevice(dev);
310 out:
311 rtnl_unlock();
312 return ret;
313 242
314out_free: 243 return register_netdev(dev);
315 free_netdev(dev);
316 goto out;
317} 244}
318 245
319int br_del_bridge(struct net *net, const char *name) 246int br_del_bridge(struct net *net, const char *name)
@@ -365,15 +292,15 @@ int br_min_mtu(const struct net_bridge *br)
365/* 292/*
366 * Recomputes features using slave's features 293 * Recomputes features using slave's features
367 */ 294 */
368void br_features_recompute(struct net_bridge *br) 295u32 br_features_recompute(struct net_bridge *br, u32 features)
369{ 296{
370 struct net_bridge_port *p; 297 struct net_bridge_port *p;
371 unsigned long features, mask; 298 u32 mask;
372 299
373 features = mask = br->feature_mask;
374 if (list_empty(&br->port_list)) 300 if (list_empty(&br->port_list))
375 goto done; 301 return features;
376 302
303 mask = features;
377 features &= ~NETIF_F_ONE_FOR_ALL; 304 features &= ~NETIF_F_ONE_FOR_ALL;
378 305
379 list_for_each_entry(p, &br->port_list, list) { 306 list_for_each_entry(p, &br->port_list, list) {
@@ -381,8 +308,7 @@ void br_features_recompute(struct net_bridge *br)
381 p->dev->features, mask); 308 p->dev->features, mask);
382 } 309 }
383 310
384done: 311 return features;
385 br->dev->features = netdev_fix_features(features, NULL);
386} 312}
387 313
388/* called with RTNL */ 314/* called with RTNL */
@@ -390,6 +316,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
390{ 316{
391 struct net_bridge_port *p; 317 struct net_bridge_port *p;
392 int err = 0; 318 int err = 0;
319 bool changed_addr;
393 320
394 /* Don't allow bridging non-ethernet like devices */ 321 /* Don't allow bridging non-ethernet like devices */
395 if ((dev->flags & IFF_LOOPBACK) || 322 if ((dev->flags & IFF_LOOPBACK) ||
@@ -412,6 +339,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
412 if (IS_ERR(p)) 339 if (IS_ERR(p))
413 return PTR_ERR(p); 340 return PTR_ERR(p);
414 341
342 call_netdevice_notifiers(NETDEV_JOIN, dev);
343
415 err = dev_set_promiscuity(dev, 1); 344 err = dev_set_promiscuity(dev, 1);
416 if (err) 345 if (err)
417 goto put_back; 346 goto put_back;
@@ -432,19 +361,24 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
432 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) 361 if (br_netpoll_info(br) && ((err = br_netpoll_enable(p))))
433 goto err3; 362 goto err3;
434 363
435 err = netdev_rx_handler_register(dev, br_handle_frame, p); 364 err = netdev_set_master(dev, br->dev);
436 if (err) 365 if (err)
437 goto err3; 366 goto err3;
438 367
368 err = netdev_rx_handler_register(dev, br_handle_frame, p);
369 if (err)
370 goto err4;
371
439 dev->priv_flags |= IFF_BRIDGE_PORT; 372 dev->priv_flags |= IFF_BRIDGE_PORT;
440 373
441 dev_disable_lro(dev); 374 dev_disable_lro(dev);
442 375
443 list_add_rcu(&p->list, &br->port_list); 376 list_add_rcu(&p->list, &br->port_list);
444 377
378 netdev_update_features(br->dev);
379
445 spin_lock_bh(&br->lock); 380 spin_lock_bh(&br->lock);
446 br_stp_recalculate_bridge_id(br); 381 changed_addr = br_stp_recalculate_bridge_id(br);
447 br_features_recompute(br);
448 382
449 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) && 383 if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
450 (br->dev->flags & IFF_UP)) 384 (br->dev->flags & IFF_UP))
@@ -453,11 +387,17 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
453 387
454 br_ifinfo_notify(RTM_NEWLINK, p); 388 br_ifinfo_notify(RTM_NEWLINK, p);
455 389
390 if (changed_addr)
391 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
392
456 dev_set_mtu(br->dev, br_min_mtu(br)); 393 dev_set_mtu(br->dev, br_min_mtu(br));
457 394
458 kobject_uevent(&p->kobj, KOBJ_ADD); 395 kobject_uevent(&p->kobj, KOBJ_ADD);
459 396
460 return 0; 397 return 0;
398
399err4:
400 netdev_set_master(dev, NULL);
461err3: 401err3:
462 sysfs_remove_link(br->ifobj, p->dev->name); 402 sysfs_remove_link(br->ifobj, p->dev->name);
463err2: 403err2:
@@ -478,20 +418,18 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
478{ 418{
479 struct net_bridge_port *p; 419 struct net_bridge_port *p;
480 420
481 if (!br_port_exists(dev)) 421 p = br_port_get_rtnl(dev);
482 return -EINVAL; 422 if (!p || p->br != br)
483
484 p = br_port_get(dev);
485 if (p->br != br)
486 return -EINVAL; 423 return -EINVAL;
487 424
488 del_nbp(p); 425 del_nbp(p);
489 426
490 spin_lock_bh(&br->lock); 427 spin_lock_bh(&br->lock);
491 br_stp_recalculate_bridge_id(br); 428 br_stp_recalculate_bridge_id(br);
492 br_features_recompute(br);
493 spin_unlock_bh(&br->lock); 429 spin_unlock_bh(&br->lock);
494 430
431 netdev_update_features(br->dev);
432
495 return 0; 433 return 0;
496} 434}
497 435
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 826cd5221536..f06ee39c73fd 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -21,6 +21,10 @@
21/* Bridge group multicast address 802.1d (pg 51). */ 21/* Bridge group multicast address 802.1d (pg 51). */
22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
23 23
24/* Hook for brouter */
25br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
26EXPORT_SYMBOL(br_should_route_hook);
27
24static int br_pass_frame_up(struct sk_buff *skb) 28static int br_pass_frame_up(struct sk_buff *skb)
25{ 29{
26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 30 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -56,7 +60,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
56 br = p->br; 60 br = p->br;
57 br_fdb_update(br, p, eth_hdr(skb)->h_source); 61 br_fdb_update(br, p, eth_hdr(skb)->h_source);
58 62
59 if (is_multicast_ether_addr(dest) && 63 if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
60 br_multicast_rcv(br, p, skb)) 64 br_multicast_rcv(br, p, skb))
61 goto drop; 65 goto drop;
62 66
@@ -73,10 +77,12 @@ int br_handle_frame_finish(struct sk_buff *skb)
73 77
74 dst = NULL; 78 dst = NULL;
75 79
76 if (is_multicast_ether_addr(dest)) { 80 if (is_broadcast_ether_addr(dest))
81 skb2 = skb;
82 else if (is_multicast_ether_addr(dest)) {
77 mdst = br_mdb_get(br, skb); 83 mdst = br_mdb_get(br, skb);
78 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 84 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
79 if ((mdst && !hlist_unhashed(&mdst->mglist)) || 85 if ((mdst && mdst->mglist) ||
80 br_multicast_is_router(br)) 86 br_multicast_is_router(br))
81 skb2 = skb; 87 skb2 = skb;
82 br_multicast_forward(mdst, skb, skb2); 88 br_multicast_forward(mdst, skb, skb2);
@@ -94,9 +100,10 @@ int br_handle_frame_finish(struct sk_buff *skb)
94 } 100 }
95 101
96 if (skb) { 102 if (skb) {
97 if (dst) 103 if (dst) {
104 dst->used = jiffies;
98 br_forward(dst->dst, skb, skb2); 105 br_forward(dst->dst, skb, skb2);
99 else 106 } else
100 br_flood_forward(br, skb, skb2); 107 br_flood_forward(br, skb, skb2);
101 } 108 }
102 109
@@ -135,21 +142,22 @@ static inline int is_link_local(const unsigned char *dest)
135 * Return NULL if skb is handled 142 * Return NULL if skb is handled
136 * note: already called with rcu_read_lock 143 * note: already called with rcu_read_lock
137 */ 144 */
138struct sk_buff *br_handle_frame(struct sk_buff *skb) 145rx_handler_result_t br_handle_frame(struct sk_buff **pskb)
139{ 146{
140 struct net_bridge_port *p; 147 struct net_bridge_port *p;
148 struct sk_buff *skb = *pskb;
141 const unsigned char *dest = eth_hdr(skb)->h_dest; 149 const unsigned char *dest = eth_hdr(skb)->h_dest;
142 int (*rhook)(struct sk_buff *skb); 150 br_should_route_hook_t *rhook;
143 151
144 if (skb->pkt_type == PACKET_LOOPBACK) 152 if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
145 return skb; 153 return RX_HANDLER_PASS;
146 154
147 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 155 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
148 goto drop; 156 goto drop;
149 157
150 skb = skb_share_check(skb, GFP_ATOMIC); 158 skb = skb_share_check(skb, GFP_ATOMIC);
151 if (!skb) 159 if (!skb)
152 return NULL; 160 return RX_HANDLER_CONSUMED;
153 161
154 p = br_port_get_rcu(skb->dev); 162 p = br_port_get_rcu(skb->dev);
155 163
@@ -163,19 +171,23 @@ struct sk_buff *br_handle_frame(struct sk_buff *skb)
163 goto forward; 171 goto forward;
164 172
165 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, 173 if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
166 NULL, br_handle_local_finish)) 174 NULL, br_handle_local_finish)) {
167 return NULL; /* frame consumed by filter */ 175 return RX_HANDLER_CONSUMED; /* consumed by filter */
168 else 176 } else {
169 return skb; /* continue processing */ 177 *pskb = skb;
178 return RX_HANDLER_PASS; /* continue processing */
179 }
170 } 180 }
171 181
172forward: 182forward:
173 switch (p->state) { 183 switch (p->state) {
174 case BR_STATE_FORWARDING: 184 case BR_STATE_FORWARDING:
175 rhook = rcu_dereference(br_should_route_hook); 185 rhook = rcu_dereference(br_should_route_hook);
176 if (rhook != NULL) { 186 if (rhook) {
177 if (rhook(skb)) 187 if ((*rhook)(skb)) {
178 return skb; 188 *pskb = skb;
189 return RX_HANDLER_PASS;
190 }
179 dest = eth_hdr(skb)->h_dest; 191 dest = eth_hdr(skb)->h_dest;
180 } 192 }
181 /* fall through */ 193 /* fall through */
@@ -190,5 +202,5 @@ forward:
190drop: 202drop:
191 kfree_skb(skb); 203 kfree_skb(skb);
192 } 204 }
193 return NULL; 205 return RX_HANDLER_CONSUMED;
194} 206}
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index cb43312b846e..7222fe1d5460 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -106,7 +106,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
106/* 106/*
107 * Legacy ioctl's through SIOCDEVPRIVATE 107 * Legacy ioctl's through SIOCDEVPRIVATE
108 * This interface is deprecated because it was too difficult to 108 * This interface is deprecated because it was too difficult to
109 * to do the translation for 32/64bit ioctl compatability. 109 * to do the translation for 32/64bit ioctl compatibility.
110 */ 110 */
111static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 111static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
112{ 112{
@@ -181,40 +181,19 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
181 if (!capable(CAP_NET_ADMIN)) 181 if (!capable(CAP_NET_ADMIN))
182 return -EPERM; 182 return -EPERM;
183 183
184 spin_lock_bh(&br->lock); 184 return br_set_forward_delay(br, args[1]);
185 br->bridge_forward_delay = clock_t_to_jiffies(args[1]);
186 if (br_is_root_bridge(br))
187 br->forward_delay = br->bridge_forward_delay;
188 spin_unlock_bh(&br->lock);
189 return 0;
190 185
191 case BRCTL_SET_BRIDGE_HELLO_TIME: 186 case BRCTL_SET_BRIDGE_HELLO_TIME:
192 {
193 unsigned long t = clock_t_to_jiffies(args[1]);
194 if (!capable(CAP_NET_ADMIN)) 187 if (!capable(CAP_NET_ADMIN))
195 return -EPERM; 188 return -EPERM;
196 189
197 if (t < HZ) 190 return br_set_hello_time(br, args[1]);
198 return -EINVAL;
199
200 spin_lock_bh(&br->lock);
201 br->bridge_hello_time = t;
202 if (br_is_root_bridge(br))
203 br->hello_time = br->bridge_hello_time;
204 spin_unlock_bh(&br->lock);
205 return 0;
206 }
207 191
208 case BRCTL_SET_BRIDGE_MAX_AGE: 192 case BRCTL_SET_BRIDGE_MAX_AGE:
209 if (!capable(CAP_NET_ADMIN)) 193 if (!capable(CAP_NET_ADMIN))
210 return -EPERM; 194 return -EPERM;
211 195
212 spin_lock_bh(&br->lock); 196 return br_set_max_age(br, args[1]);
213 br->bridge_max_age = clock_t_to_jiffies(args[1]);
214 if (br_is_root_bridge(br))
215 br->max_age = br->bridge_max_age;
216 spin_unlock_bh(&br->lock);
217 return 0;
218 197
219 case BRCTL_SET_AGEING_TIME: 198 case BRCTL_SET_AGEING_TIME:
220 if (!capable(CAP_NET_ADMIN)) 199 if (!capable(CAP_NET_ADMIN))
@@ -275,19 +254,16 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
275 case BRCTL_SET_PORT_PRIORITY: 254 case BRCTL_SET_PORT_PRIORITY:
276 { 255 {
277 struct net_bridge_port *p; 256 struct net_bridge_port *p;
278 int ret = 0; 257 int ret;
279 258
280 if (!capable(CAP_NET_ADMIN)) 259 if (!capable(CAP_NET_ADMIN))
281 return -EPERM; 260 return -EPERM;
282 261
283 if (args[2] >= (1<<(16-BR_PORT_BITS)))
284 return -ERANGE;
285
286 spin_lock_bh(&br->lock); 262 spin_lock_bh(&br->lock);
287 if ((p = br_get_port(br, args[1])) == NULL) 263 if ((p = br_get_port(br, args[1])) == NULL)
288 ret = -EINVAL; 264 ret = -EINVAL;
289 else 265 else
290 br_stp_set_port_priority(p, args[2]); 266 ret = br_stp_set_port_priority(p, args[2]);
291 spin_unlock_bh(&br->lock); 267 spin_unlock_bh(&br->lock);
292 return ret; 268 return ret;
293 } 269 }
@@ -295,15 +271,17 @@ static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
295 case BRCTL_SET_PATH_COST: 271 case BRCTL_SET_PATH_COST:
296 { 272 {
297 struct net_bridge_port *p; 273 struct net_bridge_port *p;
298 int ret = 0; 274 int ret;
299 275
300 if (!capable(CAP_NET_ADMIN)) 276 if (!capable(CAP_NET_ADMIN))
301 return -EPERM; 277 return -EPERM;
302 278
279 spin_lock_bh(&br->lock);
303 if ((p = br_get_port(br, args[1])) == NULL) 280 if ((p = br_get_port(br, args[1])) == NULL)
304 ret = -EINVAL; 281 ret = -EINVAL;
305 else 282 else
306 br_stp_set_path_cost(p, args[2]); 283 ret = br_stp_set_path_cost(p, args[2]);
284 spin_unlock_bh(&br->lock);
307 285
308 return ret; 286 return ret;
309 } 287 }
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eb5b256ffc88..2d85ca7111d3 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,11 +33,13 @@
33 33
34#include "br_private.h" 34#include "br_private.h"
35 35
36#define mlock_dereference(X, br) \
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38
36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
38{ 41{
39 if (ipv6_addr_is_multicast(addr) && 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
40 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
41 return 1; 43 return 1;
42 return 0; 44 return 0;
43} 45}
@@ -135,7 +137,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 137struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
136 struct sk_buff *skb) 138 struct sk_buff *skb)
137{ 139{
138 struct net_bridge_mdb_htable *mdb = br->mdb; 140 struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
139 struct br_ip ip; 141 struct br_ip ip;
140 142
141 if (br->multicast_disabled) 143 if (br->multicast_disabled)
@@ -229,13 +231,13 @@ static void br_multicast_group_expired(unsigned long data)
229 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 231 if (!netif_running(br->dev) || timer_pending(&mp->timer))
230 goto out; 232 goto out;
231 233
232 if (!hlist_unhashed(&mp->mglist)) 234 mp->mglist = false;
233 hlist_del_init(&mp->mglist);
234 235
235 if (mp->ports) 236 if (mp->ports)
236 goto out; 237 goto out;
237 238
238 mdb = br->mdb; 239 mdb = mlock_dereference(br->mdb, br);
240
239 hlist_del_rcu(&mp->hlist[mdb->ver]); 241 hlist_del_rcu(&mp->hlist[mdb->ver]);
240 mdb->size--; 242 mdb->size--;
241 243
@@ -249,16 +251,20 @@ out:
249static void br_multicast_del_pg(struct net_bridge *br, 251static void br_multicast_del_pg(struct net_bridge *br,
250 struct net_bridge_port_group *pg) 252 struct net_bridge_port_group *pg)
251{ 253{
252 struct net_bridge_mdb_htable *mdb = br->mdb; 254 struct net_bridge_mdb_htable *mdb;
253 struct net_bridge_mdb_entry *mp; 255 struct net_bridge_mdb_entry *mp;
254 struct net_bridge_port_group *p; 256 struct net_bridge_port_group *p;
255 struct net_bridge_port_group **pp; 257 struct net_bridge_port_group __rcu **pp;
258
259 mdb = mlock_dereference(br->mdb, br);
256 260
257 mp = br_mdb_ip_get(mdb, &pg->addr); 261 mp = br_mdb_ip_get(mdb, &pg->addr);
258 if (WARN_ON(!mp)) 262 if (WARN_ON(!mp))
259 return; 263 return;
260 264
261 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 265 for (pp = &mp->ports;
266 (p = mlock_dereference(*pp, br)) != NULL;
267 pp = &p->next) {
262 if (p != pg) 268 if (p != pg)
263 continue; 269 continue;
264 270
@@ -268,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
268 del_timer(&p->query_timer); 274 del_timer(&p->query_timer);
269 call_rcu_bh(&p->rcu, br_multicast_free_pg); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg);
270 276
271 if (!mp->ports && hlist_unhashed(&mp->mglist) && 277 if (!mp->ports && !mp->mglist &&
272 netif_running(br->dev)) 278 netif_running(br->dev))
273 mod_timer(&mp->timer, jiffies); 279 mod_timer(&mp->timer, jiffies);
274 280
@@ -294,10 +300,10 @@ out:
294 spin_unlock(&br->multicast_lock); 300 spin_unlock(&br->multicast_lock);
295} 301}
296 302
297static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max, 303static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
298 int elasticity) 304 int elasticity)
299{ 305{
300 struct net_bridge_mdb_htable *old = *mdbp; 306 struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
301 struct net_bridge_mdb_htable *mdb; 307 struct net_bridge_mdb_htable *mdb;
302 int err; 308 int err;
303 309
@@ -407,7 +413,7 @@ out:
407 413
408#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
409static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, 415static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
410 struct in6_addr *group) 416 const struct in6_addr *group)
411{ 417{
412 struct sk_buff *skb; 418 struct sk_buff *skb;
413 struct ipv6hdr *ip6h; 419 struct ipv6hdr *ip6h;
@@ -428,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
428 eth = eth_hdr(skb); 434 eth = eth_hdr(skb);
429 435
430 memcpy(eth->h_source, br->dev->dev_addr, 6); 436 memcpy(eth->h_source, br->dev->dev_addr, 6);
431 ipv6_eth_mc_map(group, eth->h_dest);
432 eth->h_proto = htons(ETH_P_IPV6); 437 eth->h_proto = htons(ETH_P_IPV6);
433 skb_put(skb, sizeof(*eth)); 438 skb_put(skb, sizeof(*eth));
434 439
@@ -437,11 +442,13 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
437 ip6h = ipv6_hdr(skb); 442 ip6h = ipv6_hdr(skb);
438 443
439 *(__force __be32 *)ip6h = htonl(0x60000000); 444 *(__force __be32 *)ip6h = htonl(0x60000000);
440 ip6h->payload_len = 8 + sizeof(*mldq); 445 ip6h->payload_len = htons(8 + sizeof(*mldq));
441 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
442 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
443 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
444 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 448 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
449 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
450 &ip6h->saddr);
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
445 452
446 hopopt = (u8 *)(ip6h + 1); 453 hopopt = (u8 *)(ip6h + 1);
447 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -520,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
520 struct net_bridge *br = mp->br; 527 struct net_bridge *br = mp->br;
521 528
522 spin_lock(&br->multicast_lock); 529 spin_lock(&br->multicast_lock);
523 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 530 if (!netif_running(br->dev) || !mp->mglist ||
524 mp->queries_sent >= br->multicast_last_member_count) 531 mp->queries_sent >= br->multicast_last_member_count)
525 goto out; 532 goto out;
526 533
@@ -569,7 +576,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
569 struct net_bridge *br, struct net_bridge_port *port, 576 struct net_bridge *br, struct net_bridge_port *port,
570 struct br_ip *group, int hash) 577 struct br_ip *group, int hash)
571{ 578{
572 struct net_bridge_mdb_htable *mdb = br->mdb; 579 struct net_bridge_mdb_htable *mdb;
573 struct net_bridge_mdb_entry *mp; 580 struct net_bridge_mdb_entry *mp;
574 struct hlist_node *p; 581 struct hlist_node *p;
575 unsigned count = 0; 582 unsigned count = 0;
@@ -577,6 +584,7 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
577 int elasticity; 584 int elasticity;
578 int err; 585 int err;
579 586
587 mdb = rcu_dereference_protected(br->mdb, 1);
580 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 588 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
581 count++; 589 count++;
582 if (unlikely(br_ip_equal(group, &mp->addr))) 590 if (unlikely(br_ip_equal(group, &mp->addr)))
@@ -642,13 +650,16 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
642 struct net_bridge *br, struct net_bridge_port *port, 650 struct net_bridge *br, struct net_bridge_port *port,
643 struct br_ip *group) 651 struct br_ip *group)
644{ 652{
645 struct net_bridge_mdb_htable *mdb = br->mdb; 653 struct net_bridge_mdb_htable *mdb;
646 struct net_bridge_mdb_entry *mp; 654 struct net_bridge_mdb_entry *mp;
647 int hash; 655 int hash;
656 int err;
648 657
658 mdb = rcu_dereference_protected(br->mdb, 1);
649 if (!mdb) { 659 if (!mdb) {
650 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0)) 660 err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
651 return NULL; 661 if (err)
662 return ERR_PTR(err);
652 goto rehash; 663 goto rehash;
653 } 664 }
654 665
@@ -660,7 +671,7 @@ static struct net_bridge_mdb_entry *br_multicast_new_group(
660 671
661 case -EAGAIN: 672 case -EAGAIN:
662rehash: 673rehash:
663 mdb = br->mdb; 674 mdb = rcu_dereference_protected(br->mdb, 1);
664 hash = br_ip_hash(mdb, group); 675 hash = br_ip_hash(mdb, group);
665 break; 676 break;
666 677
@@ -670,7 +681,7 @@ rehash:
670 681
671 mp = kzalloc(sizeof(*mp), GFP_ATOMIC); 682 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
672 if (unlikely(!mp)) 683 if (unlikely(!mp))
673 goto out; 684 return ERR_PTR(-ENOMEM);
674 685
675 mp->br = br; 686 mp->br = br;
676 mp->addr = *group; 687 mp->addr = *group;
@@ -692,7 +703,7 @@ static int br_multicast_add_group(struct net_bridge *br,
692{ 703{
693 struct net_bridge_mdb_entry *mp; 704 struct net_bridge_mdb_entry *mp;
694 struct net_bridge_port_group *p; 705 struct net_bridge_port_group *p;
695 struct net_bridge_port_group **pp; 706 struct net_bridge_port_group __rcu **pp;
696 unsigned long now = jiffies; 707 unsigned long now = jiffies;
697 int err; 708 int err;
698 709
@@ -703,16 +714,18 @@ static int br_multicast_add_group(struct net_bridge *br,
703 714
704 mp = br_multicast_new_group(br, port, group); 715 mp = br_multicast_new_group(br, port, group);
705 err = PTR_ERR(mp); 716 err = PTR_ERR(mp);
706 if (unlikely(IS_ERR(mp) || !mp)) 717 if (IS_ERR(mp))
707 goto err; 718 goto err;
708 719
709 if (!port) { 720 if (!port) {
710 hlist_add_head(&mp->mglist, &br->mglist); 721 mp->mglist = true;
711 mod_timer(&mp->timer, now + br->multicast_membership_interval); 722 mod_timer(&mp->timer, now + br->multicast_membership_interval);
712 goto out; 723 goto out;
713 } 724 }
714 725
715 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 726 for (pp = &mp->ports;
727 (p = mlock_dereference(*pp, br)) != NULL;
728 pp = &p->next) {
716 if (p->port == port) 729 if (p->port == port)
717 goto found; 730 goto found;
718 if ((unsigned long)p->port < (unsigned long)port) 731 if ((unsigned long)p->port < (unsigned long)port)
@@ -767,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
767{ 780{
768 struct br_ip br_group; 781 struct br_ip br_group;
769 782
770 if (ipv6_is_local_multicast(group)) 783 if (!ipv6_is_transient_multicast(group))
771 return 0; 784 return 0;
772 785
773 ipv6_addr_copy(&br_group.u.ip6, group); 786 ipv6_addr_copy(&br_group.u.ip6, group);
774 br_group.proto = htons(ETH_P_IP); 787 br_group.proto = htons(ETH_P_IPV6);
775 788
776 return br_multicast_add_group(br, port, &br_group); 789 return br_multicast_add_group(br, port, &br_group);
777} 790}
@@ -1000,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1000 1013
1001 nsrcs = skb_header_pointer(skb, 1014 nsrcs = skb_header_pointer(skb,
1002 len + offsetof(struct mld2_grec, 1015 len + offsetof(struct mld2_grec,
1003 grec_mca), 1016 grec_nsrcs),
1004 sizeof(_nsrcs), &_nsrcs); 1017 sizeof(_nsrcs), &_nsrcs);
1005 if (!nsrcs) 1018 if (!nsrcs)
1006 return -EINVAL; 1019 return -EINVAL;
1007 1020
1008 if (!pskb_may_pull(skb, 1021 if (!pskb_may_pull(skb,
1009 len + sizeof(*grec) + 1022 len + sizeof(*grec) +
1010 sizeof(struct in6_addr) * (*nsrcs))) 1023 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1011 return -EINVAL; 1024 return -EINVAL;
1012 1025
1013 grec = (struct mld2_grec *)(skb->data + len); 1026 grec = (struct mld2_grec *)(skb->data + len);
1014 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1027 len += sizeof(*grec) +
1028 sizeof(struct in6_addr) * ntohs(*nsrcs);
1015 1029
1016 /* We treat these as MLDv1 reports for now. */ 1030 /* We treat these as MLDv1 reports for now. */
1017 switch (grec->grec_type) { 1031 switch (grec->grec_type) {
@@ -1101,12 +1115,12 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1101 struct net_bridge_port *port, 1115 struct net_bridge_port *port,
1102 struct sk_buff *skb) 1116 struct sk_buff *skb)
1103{ 1117{
1104 struct iphdr *iph = ip_hdr(skb); 1118 const struct iphdr *iph = ip_hdr(skb);
1105 struct igmphdr *ih = igmp_hdr(skb); 1119 struct igmphdr *ih = igmp_hdr(skb);
1106 struct net_bridge_mdb_entry *mp; 1120 struct net_bridge_mdb_entry *mp;
1107 struct igmpv3_query *ih3; 1121 struct igmpv3_query *ih3;
1108 struct net_bridge_port_group *p; 1122 struct net_bridge_port_group *p;
1109 struct net_bridge_port_group **pp; 1123 struct net_bridge_port_group __rcu **pp;
1110 unsigned long max_delay; 1124 unsigned long max_delay;
1111 unsigned long now = jiffies; 1125 unsigned long now = jiffies;
1112 __be32 group; 1126 __be32 group;
@@ -1145,23 +1159,25 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1145 if (!group) 1159 if (!group)
1146 goto out; 1160 goto out;
1147 1161
1148 mp = br_mdb_ip4_get(br->mdb, group); 1162 mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
1149 if (!mp) 1163 if (!mp)
1150 goto out; 1164 goto out;
1151 1165
1152 max_delay *= br->multicast_last_member_count; 1166 max_delay *= br->multicast_last_member_count;
1153 1167
1154 if (!hlist_unhashed(&mp->mglist) && 1168 if (mp->mglist &&
1155 (timer_pending(&mp->timer) ? 1169 (timer_pending(&mp->timer) ?
1156 time_after(mp->timer.expires, now + max_delay) : 1170 time_after(mp->timer.expires, now + max_delay) :
1157 try_to_del_timer_sync(&mp->timer) >= 0)) 1171 try_to_del_timer_sync(&mp->timer) >= 0))
1158 mod_timer(&mp->timer, now + max_delay); 1172 mod_timer(&mp->timer, now + max_delay);
1159 1173
1160 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1174 for (pp = &mp->ports;
1175 (p = mlock_dereference(*pp, br)) != NULL;
1176 pp = &p->next) {
1161 if (timer_pending(&p->timer) ? 1177 if (timer_pending(&p->timer) ?
1162 time_after(p->timer.expires, now + max_delay) : 1178 time_after(p->timer.expires, now + max_delay) :
1163 try_to_del_timer_sync(&p->timer) >= 0) 1179 try_to_del_timer_sync(&p->timer) >= 0)
1164 mod_timer(&mp->timer, now + max_delay); 1180 mod_timer(&p->timer, now + max_delay);
1165 } 1181 }
1166 1182
1167out: 1183out:
@@ -1174,14 +1190,15 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1174 struct net_bridge_port *port, 1190 struct net_bridge_port *port,
1175 struct sk_buff *skb) 1191 struct sk_buff *skb)
1176{ 1192{
1177 struct ipv6hdr *ip6h = ipv6_hdr(skb); 1193 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1178 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); 1194 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1179 struct net_bridge_mdb_entry *mp; 1195 struct net_bridge_mdb_entry *mp;
1180 struct mld2_query *mld2q; 1196 struct mld2_query *mld2q;
1181 struct net_bridge_port_group *p, **pp; 1197 struct net_bridge_port_group *p;
1198 struct net_bridge_port_group __rcu **pp;
1182 unsigned long max_delay; 1199 unsigned long max_delay;
1183 unsigned long now = jiffies; 1200 unsigned long now = jiffies;
1184 struct in6_addr *group = NULL; 1201 const struct in6_addr *group = NULL;
1185 int err = 0; 1202 int err = 0;
1186 1203
1187 spin_lock(&br->multicast_lock); 1204 spin_lock(&br->multicast_lock);
@@ -1214,22 +1231,24 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1214 if (!group) 1231 if (!group)
1215 goto out; 1232 goto out;
1216 1233
1217 mp = br_mdb_ip6_get(br->mdb, group); 1234 mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
1218 if (!mp) 1235 if (!mp)
1219 goto out; 1236 goto out;
1220 1237
1221 max_delay *= br->multicast_last_member_count; 1238 max_delay *= br->multicast_last_member_count;
1222 if (!hlist_unhashed(&mp->mglist) && 1239 if (mp->mglist &&
1223 (timer_pending(&mp->timer) ? 1240 (timer_pending(&mp->timer) ?
1224 time_after(mp->timer.expires, now + max_delay) : 1241 time_after(mp->timer.expires, now + max_delay) :
1225 try_to_del_timer_sync(&mp->timer) >= 0)) 1242 try_to_del_timer_sync(&mp->timer) >= 0))
1226 mod_timer(&mp->timer, now + max_delay); 1243 mod_timer(&mp->timer, now + max_delay);
1227 1244
1228 for (pp = &mp->ports; (p = *pp); pp = &p->next) { 1245 for (pp = &mp->ports;
1246 (p = mlock_dereference(*pp, br)) != NULL;
1247 pp = &p->next) {
1229 if (timer_pending(&p->timer) ? 1248 if (timer_pending(&p->timer) ?
1230 time_after(p->timer.expires, now + max_delay) : 1249 time_after(p->timer.expires, now + max_delay) :
1231 try_to_del_timer_sync(&p->timer) >= 0) 1250 try_to_del_timer_sync(&p->timer) >= 0)
1232 mod_timer(&mp->timer, now + max_delay); 1251 mod_timer(&p->timer, now + max_delay);
1233 } 1252 }
1234 1253
1235out: 1254out:
@@ -1254,7 +1273,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1254 timer_pending(&br->multicast_querier_timer)) 1273 timer_pending(&br->multicast_querier_timer))
1255 goto out; 1274 goto out;
1256 1275
1257 mdb = br->mdb; 1276 mdb = mlock_dereference(br->mdb, br);
1258 mp = br_mdb_ip_get(mdb, group); 1277 mp = br_mdb_ip_get(mdb, group);
1259 if (!mp) 1278 if (!mp)
1260 goto out; 1279 goto out;
@@ -1264,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1264 br->multicast_last_member_interval; 1283 br->multicast_last_member_interval;
1265 1284
1266 if (!port) { 1285 if (!port) {
1267 if (!hlist_unhashed(&mp->mglist) && 1286 if (mp->mglist &&
1268 (timer_pending(&mp->timer) ? 1287 (timer_pending(&mp->timer) ?
1269 time_after(mp->timer.expires, time) : 1288 time_after(mp->timer.expires, time) :
1270 try_to_del_timer_sync(&mp->timer) >= 0)) { 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1277,7 +1296,9 @@ static void br_multicast_leave_group(struct net_bridge *br,
1277 goto out; 1296 goto out;
1278 } 1297 }
1279 1298
1280 for (p = mp->ports; p; p = p->next) { 1299 for (p = mlock_dereference(mp->ports, br);
1300 p != NULL;
1301 p = mlock_dereference(p->next, br)) {
1281 if (p->port != port) 1302 if (p->port != port)
1282 continue; 1303 continue;
1283 1304
@@ -1320,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1320{ 1341{
1321 struct br_ip br_group; 1342 struct br_ip br_group;
1322 1343
1323 if (ipv6_is_local_multicast(group)) 1344 if (!ipv6_is_transient_multicast(group))
1324 return; 1345 return;
1325 1346
1326 ipv6_addr_copy(&br_group.u.ip6, group); 1347 ipv6_addr_copy(&br_group.u.ip6, group);
@@ -1335,7 +1356,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1335 struct sk_buff *skb) 1356 struct sk_buff *skb)
1336{ 1357{
1337 struct sk_buff *skb2 = skb; 1358 struct sk_buff *skb2 = skb;
1338 struct iphdr *iph; 1359 const struct iphdr *iph;
1339 struct igmphdr *ih; 1360 struct igmphdr *ih;
1340 unsigned len; 1361 unsigned len;
1341 unsigned offset; 1362 unsigned offset;
@@ -1358,8 +1379,11 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1358 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1379 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1359 return -EINVAL; 1380 return -EINVAL;
1360 1381
1361 if (iph->protocol != IPPROTO_IGMP) 1382 if (iph->protocol != IPPROTO_IGMP) {
1383 if ((iph->daddr & IGMP_LOCAL_GROUP_MASK) != IGMP_LOCAL_GROUP)
1384 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1362 return 0; 1385 return 0;
1386 }
1363 1387
1364 len = ntohs(iph->tot_len); 1388 len = ntohs(iph->tot_len);
1365 if (skb->len < len || len < ip_hdrlen(skb)) 1389 if (skb->len < len || len < ip_hdrlen(skb))
@@ -1403,7 +1427,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1403 switch (ih->type) { 1427 switch (ih->type) {
1404 case IGMP_HOST_MEMBERSHIP_REPORT: 1428 case IGMP_HOST_MEMBERSHIP_REPORT:
1405 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1429 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1406 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1430 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1407 err = br_ip4_multicast_add_group(br, port, ih->group); 1431 err = br_ip4_multicast_add_group(br, port, ih->group);
1408 break; 1432 break;
1409 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1433 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1430,8 +1454,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1430 struct net_bridge_port *port, 1454 struct net_bridge_port *port,
1431 struct sk_buff *skb) 1455 struct sk_buff *skb)
1432{ 1456{
1433 struct sk_buff *skb2 = skb; 1457 struct sk_buff *skb2;
1434 struct ipv6hdr *ip6h; 1458 const struct ipv6hdr *ip6h;
1435 struct icmp6hdr *icmp6h; 1459 struct icmp6hdr *icmp6h;
1436 u8 nexthdr; 1460 u8 nexthdr;
1437 unsigned len; 1461 unsigned len;
@@ -1454,7 +1478,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1454 ip6h->payload_len == 0) 1478 ip6h->payload_len == 0)
1455 return 0; 1479 return 0;
1456 1480
1457 len = ntohs(ip6h->payload_len); 1481 len = ntohs(ip6h->payload_len) + sizeof(*ip6h);
1458 if (skb->len < len) 1482 if (skb->len < len)
1459 return -EINVAL; 1483 return -EINVAL;
1460 1484
@@ -1469,15 +1493,15 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1469 if (!skb2) 1493 if (!skb2)
1470 return -ENOMEM; 1494 return -ENOMEM;
1471 1495
1496 err = -EINVAL;
1497 if (!pskb_may_pull(skb2, offset + sizeof(struct icmp6hdr)))
1498 goto out;
1499
1472 len -= offset - skb_network_offset(skb2); 1500 len -= offset - skb_network_offset(skb2);
1473 1501
1474 __skb_pull(skb2, offset); 1502 __skb_pull(skb2, offset);
1475 skb_reset_transport_header(skb2); 1503 skb_reset_transport_header(skb2);
1476 1504
1477 err = -EINVAL;
1478 if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
1479 goto out;
1480
1481 icmp6h = icmp6_hdr(skb2); 1505 icmp6h = icmp6_hdr(skb2);
1482 1506
1483 switch (icmp6h->icmp6_type) { 1507 switch (icmp6h->icmp6_type) {
@@ -1516,8 +1540,13 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1516 switch (icmp6h->icmp6_type) { 1540 switch (icmp6h->icmp6_type) {
1517 case ICMPV6_MGM_REPORT: 1541 case ICMPV6_MGM_REPORT:
1518 { 1542 {
1519 struct mld_msg *mld = (struct mld_msg *)icmp6h; 1543 struct mld_msg *mld;
1520 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1544 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1545 err = -EINVAL;
1546 goto out;
1547 }
1548 mld = (struct mld_msg *)skb_transport_header(skb2);
1549 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1521 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1550 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1522 break; 1551 break;
1523 } 1552 }
@@ -1529,15 +1558,18 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1529 break; 1558 break;
1530 case ICMPV6_MGM_REDUCTION: 1559 case ICMPV6_MGM_REDUCTION:
1531 { 1560 {
1532 struct mld_msg *mld = (struct mld_msg *)icmp6h; 1561 struct mld_msg *mld;
1562 if (!pskb_may_pull(skb2, sizeof(*mld))) {
1563 err = -EINVAL;
1564 goto out;
1565 }
1566 mld = (struct mld_msg *)skb_transport_header(skb2);
1533 br_ip6_multicast_leave_group(br, port, &mld->mld_mca); 1567 br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1534 } 1568 }
1535 } 1569 }
1536 1570
1537out: 1571out:
1538 __skb_push(skb2, offset); 1572 kfree_skb(skb2);
1539 if (skb2 != skb)
1540 kfree_skb(skb2);
1541 return err; 1573 return err;
1542} 1574}
1543#endif 1575#endif
@@ -1625,7 +1657,7 @@ void br_multicast_stop(struct net_bridge *br)
1625 del_timer_sync(&br->multicast_query_timer); 1657 del_timer_sync(&br->multicast_query_timer);
1626 1658
1627 spin_lock_bh(&br->multicast_lock); 1659 spin_lock_bh(&br->multicast_lock);
1628 mdb = br->mdb; 1660 mdb = mlock_dereference(br->mdb, br);
1629 if (!mdb) 1661 if (!mdb)
1630 goto out; 1662 goto out;
1631 1663
@@ -1729,6 +1761,7 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1729{ 1761{
1730 struct net_bridge_port *port; 1762 struct net_bridge_port *port;
1731 int err = 0; 1763 int err = 0;
1764 struct net_bridge_mdb_htable *mdb;
1732 1765
1733 spin_lock(&br->multicast_lock); 1766 spin_lock(&br->multicast_lock);
1734 if (br->multicast_disabled == !val) 1767 if (br->multicast_disabled == !val)
@@ -1741,15 +1774,16 @@ int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1741 if (!netif_running(br->dev)) 1774 if (!netif_running(br->dev))
1742 goto unlock; 1775 goto unlock;
1743 1776
1744 if (br->mdb) { 1777 mdb = mlock_dereference(br->mdb, br);
1745 if (br->mdb->old) { 1778 if (mdb) {
1779 if (mdb->old) {
1746 err = -EEXIST; 1780 err = -EEXIST;
1747rollback: 1781rollback:
1748 br->multicast_disabled = !!val; 1782 br->multicast_disabled = !!val;
1749 goto unlock; 1783 goto unlock;
1750 } 1784 }
1751 1785
1752 err = br_mdb_rehash(&br->mdb, br->mdb->max, 1786 err = br_mdb_rehash(&br->mdb, mdb->max,
1753 br->hash_elasticity); 1787 br->hash_elasticity);
1754 if (err) 1788 if (err)
1755 goto rollback; 1789 goto rollback;
@@ -1774,6 +1808,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1774{ 1808{
1775 int err = -ENOENT; 1809 int err = -ENOENT;
1776 u32 old; 1810 u32 old;
1811 struct net_bridge_mdb_htable *mdb;
1777 1812
1778 spin_lock(&br->multicast_lock); 1813 spin_lock(&br->multicast_lock);
1779 if (!netif_running(br->dev)) 1814 if (!netif_running(br->dev))
@@ -1782,7 +1817,9 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1782 err = -EINVAL; 1817 err = -EINVAL;
1783 if (!is_power_of_2(val)) 1818 if (!is_power_of_2(val))
1784 goto unlock; 1819 goto unlock;
1785 if (br->mdb && val < br->mdb->size) 1820
1821 mdb = mlock_dereference(br->mdb, br);
1822 if (mdb && val < mdb->size)
1786 goto unlock; 1823 goto unlock;
1787 1824
1788 err = 0; 1825 err = 0;
@@ -1790,8 +1827,8 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1790 old = br->hash_max; 1827 old = br->hash_max;
1791 br->hash_max = val; 1828 br->hash_max = val;
1792 1829
1793 if (br->mdb) { 1830 if (mdb) {
1794 if (br->mdb->old) { 1831 if (mdb->old) {
1795 err = -EEXIST; 1832 err = -EEXIST;
1796rollback: 1833rollback:
1797 br->hash_max = old; 1834 br->hash_max = old;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 137f23259a93..56149ec36d7f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -64,22 +64,24 @@ static int brnf_filter_pppoe_tagged __read_mostly = 0;
64 64
65static inline __be16 vlan_proto(const struct sk_buff *skb) 65static inline __be16 vlan_proto(const struct sk_buff *skb)
66{ 66{
67 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto; 67 if (vlan_tx_tag_present(skb))
68 return skb->protocol;
69 else if (skb->protocol == htons(ETH_P_8021Q))
70 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
71 else
72 return 0;
68} 73}
69 74
70#define IS_VLAN_IP(skb) \ 75#define IS_VLAN_IP(skb) \
71 (skb->protocol == htons(ETH_P_8021Q) && \ 76 (vlan_proto(skb) == htons(ETH_P_IP) && \
72 vlan_proto(skb) == htons(ETH_P_IP) && \
73 brnf_filter_vlan_tagged) 77 brnf_filter_vlan_tagged)
74 78
75#define IS_VLAN_IPV6(skb) \ 79#define IS_VLAN_IPV6(skb) \
76 (skb->protocol == htons(ETH_P_8021Q) && \ 80 (vlan_proto(skb) == htons(ETH_P_IPV6) && \
77 vlan_proto(skb) == htons(ETH_P_IPV6) &&\
78 brnf_filter_vlan_tagged) 81 brnf_filter_vlan_tagged)
79 82
80#define IS_VLAN_ARP(skb) \ 83#define IS_VLAN_ARP(skb) \
81 (skb->protocol == htons(ETH_P_8021Q) && \ 84 (vlan_proto(skb) == htons(ETH_P_ARP) && \
82 vlan_proto(skb) == htons(ETH_P_ARP) && \
83 brnf_filter_vlan_tagged) 85 brnf_filter_vlan_tagged)
84 86
85static inline __be16 pppoe_proto(const struct sk_buff *skb) 87static inline __be16 pppoe_proto(const struct sk_buff *skb)
@@ -102,11 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
102{ 104{
103} 105}
104 106
107static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
108{
109 return NULL;
110}
111
105static struct dst_ops fake_dst_ops = { 112static struct dst_ops fake_dst_ops = {
106 .family = AF_INET, 113 .family = AF_INET,
107 .protocol = cpu_to_be16(ETH_P_IP), 114 .protocol = cpu_to_be16(ETH_P_IP),
108 .update_pmtu = fake_update_pmtu, 115 .update_pmtu = fake_update_pmtu,
109 .entries = ATOMIC_INIT(0), 116 .cow_metrics = fake_cow_metrics,
110}; 117};
111 118
112/* 119/*
@@ -116,6 +123,10 @@ static struct dst_ops fake_dst_ops = {
116 * ipt_REJECT needs it. Future netfilter modules might 123 * ipt_REJECT needs it. Future netfilter modules might
117 * require us to fill additional fields. 124 * require us to fill additional fields.
118 */ 125 */
126static const u32 br_dst_default_metrics[RTAX_MAX] = {
127 [RTAX_MTU - 1] = 1500,
128};
129
119void br_netfilter_rtable_init(struct net_bridge *br) 130void br_netfilter_rtable_init(struct net_bridge *br)
120{ 131{
121 struct rtable *rt = &br->fake_rtable; 132 struct rtable *rt = &br->fake_rtable;
@@ -123,24 +134,25 @@ void br_netfilter_rtable_init(struct net_bridge *br)
123 atomic_set(&rt->dst.__refcnt, 1); 134 atomic_set(&rt->dst.__refcnt, 1);
124 rt->dst.dev = br->dev; 135 rt->dst.dev = br->dev;
125 rt->dst.path = &rt->dst; 136 rt->dst.path = &rt->dst;
126 rt->dst.metrics[RTAX_MTU - 1] = 1500; 137 dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
127 rt->dst.flags = DST_NOXFRM; 138 rt->dst.flags = DST_NOXFRM;
128 rt->dst.ops = &fake_dst_ops; 139 rt->dst.ops = &fake_dst_ops;
129} 140}
130 141
131static inline struct rtable *bridge_parent_rtable(const struct net_device *dev) 142static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
132{ 143{
133 if (!br_port_exists(dev)) 144 struct net_bridge_port *port;
134 return NULL; 145
135 return &br_port_get_rcu(dev)->br->fake_rtable; 146 port = br_port_get_rcu(dev);
147 return port ? &port->br->fake_rtable : NULL;
136} 148}
137 149
138static inline struct net_device *bridge_parent(const struct net_device *dev) 150static inline struct net_device *bridge_parent(const struct net_device *dev)
139{ 151{
140 if (!br_port_exists(dev)) 152 struct net_bridge_port *port;
141 return NULL;
142 153
143 return br_port_get_rcu(dev)->br->dev; 154 port = br_port_get_rcu(dev);
155 return port ? port->br->dev : NULL;
144} 156}
145 157
146static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb) 158static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -209,6 +221,70 @@ static inline void nf_bridge_update_protocol(struct sk_buff *skb)
209 skb->protocol = htons(ETH_P_PPP_SES); 221 skb->protocol = htons(ETH_P_PPP_SES);
210} 222}
211 223
224/* When handing a packet over to the IP layer
225 * check whether we have a skb that is in the
226 * expected format
227 */
228
229static int br_parse_ip_options(struct sk_buff *skb)
230{
231 struct ip_options *opt;
232 const struct iphdr *iph;
233 struct net_device *dev = skb->dev;
234 u32 len;
235
236 iph = ip_hdr(skb);
237 opt = &(IPCB(skb)->opt);
238
239 /* Basic sanity checks */
240 if (iph->ihl < 5 || iph->version != 4)
241 goto inhdr_error;
242
243 if (!pskb_may_pull(skb, iph->ihl*4))
244 goto inhdr_error;
245
246 iph = ip_hdr(skb);
247 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
248 goto inhdr_error;
249
250 len = ntohs(iph->tot_len);
251 if (skb->len < len) {
252 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
253 goto drop;
254 } else if (len < (iph->ihl*4))
255 goto inhdr_error;
256
257 if (pskb_trim_rcsum(skb, len)) {
258 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
259 goto drop;
260 }
261
262 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
263 if (iph->ihl == 5)
264 return 0;
265
266 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
267 if (ip_options_compile(dev_net(dev), opt, skb))
268 goto inhdr_error;
269
270 /* Check correct handling of SRR option */
271 if (unlikely(opt->srr)) {
272 struct in_device *in_dev = __in_dev_get_rcu(dev);
273 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
274 goto drop;
275
276 if (ip_options_rcv_srr(skb))
277 goto drop;
278 }
279
280 return 0;
281
282inhdr_error:
283 IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
284drop:
285 return -1;
286}
287
212/* Fill in the header for fragmented IP packets handled by 288/* Fill in the header for fragmented IP packets handled by
213 * the IPv4 connection tracking code. 289 * the IPv4 connection tracking code.
214 */ 290 */
@@ -344,15 +420,6 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
344 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 420 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
345 if (dnat_took_place(skb)) { 421 if (dnat_took_place(skb)) {
346 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { 422 if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
347 struct flowi fl = {
348 .nl_u = {
349 .ip4_u = {
350 .daddr = iph->daddr,
351 .saddr = 0,
352 .tos = RT_TOS(iph->tos) },
353 },
354 .proto = 0,
355 };
356 struct in_device *in_dev = __in_dev_get_rcu(dev); 423 struct in_device *in_dev = __in_dev_get_rcu(dev);
357 424
358 /* If err equals -EHOSTUNREACH the error is due to a 425 /* If err equals -EHOSTUNREACH the error is due to a
@@ -365,14 +432,16 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
365 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) 432 if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
366 goto free_skb; 433 goto free_skb;
367 434
368 if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { 435 rt = ip_route_output(dev_net(dev), iph->daddr, 0,
436 RT_TOS(iph->tos), 0);
437 if (!IS_ERR(rt)) {
369 /* - Bridged-and-DNAT'ed traffic doesn't 438 /* - Bridged-and-DNAT'ed traffic doesn't
370 * require ip_forwarding. */ 439 * require ip_forwarding. */
371 if (((struct dst_entry *)rt)->dev == dev) { 440 if (rt->dst.dev == dev) {
372 skb_dst_set(skb, (struct dst_entry *)rt); 441 skb_dst_set(skb, &rt->dst);
373 goto bridged_dnat; 442 goto bridged_dnat;
374 } 443 }
375 dst_release((struct dst_entry *)rt); 444 ip_rt_put(rt);
376 } 445 }
377free_skb: 446free_skb:
378 kfree_skb(skb); 447 kfree_skb(skb);
@@ -495,30 +564,30 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
495 const struct net_device *out, 564 const struct net_device *out,
496 int (*okfn)(struct sk_buff *)) 565 int (*okfn)(struct sk_buff *))
497{ 566{
498 struct ipv6hdr *hdr; 567 const struct ipv6hdr *hdr;
499 u32 pkt_len; 568 u32 pkt_len;
500 569
501 if (skb->len < sizeof(struct ipv6hdr)) 570 if (skb->len < sizeof(struct ipv6hdr))
502 goto inhdr_error; 571 return NF_DROP;
503 572
504 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 573 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
505 goto inhdr_error; 574 return NF_DROP;
506 575
507 hdr = ipv6_hdr(skb); 576 hdr = ipv6_hdr(skb);
508 577
509 if (hdr->version != 6) 578 if (hdr->version != 6)
510 goto inhdr_error; 579 return NF_DROP;
511 580
512 pkt_len = ntohs(hdr->payload_len); 581 pkt_len = ntohs(hdr->payload_len);
513 582
514 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { 583 if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
515 if (pkt_len + sizeof(struct ipv6hdr) > skb->len) 584 if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
516 goto inhdr_error; 585 return NF_DROP;
517 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) 586 if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
518 goto inhdr_error; 587 return NF_DROP;
519 } 588 }
520 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) 589 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
521 goto inhdr_error; 590 return NF_DROP;
522 591
523 nf_bridge_put(skb->nf_bridge); 592 nf_bridge_put(skb->nf_bridge);
524 if (!nf_bridge_alloc(skb)) 593 if (!nf_bridge_alloc(skb))
@@ -531,9 +600,6 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
531 br_nf_pre_routing_finish_ipv6); 600 br_nf_pre_routing_finish_ipv6);
532 601
533 return NF_STOLEN; 602 return NF_STOLEN;
534
535inhdr_error:
536 return NF_DROP;
537} 603}
538 604
539/* Direct IPv6 traffic to br_nf_pre_routing_ipv6. 605/* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
@@ -549,15 +615,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
549{ 615{
550 struct net_bridge_port *p; 616 struct net_bridge_port *p;
551 struct net_bridge *br; 617 struct net_bridge *br;
552 struct iphdr *iph;
553 __u32 len = nf_bridge_encap_header_len(skb); 618 __u32 len = nf_bridge_encap_header_len(skb);
554 619
555 if (unlikely(!pskb_may_pull(skb, len))) 620 if (unlikely(!pskb_may_pull(skb, len)))
556 goto out; 621 return NF_DROP;
557 622
558 p = br_port_get_rcu(in); 623 p = br_port_get_rcu(in);
559 if (p == NULL) 624 if (p == NULL)
560 goto out; 625 return NF_DROP;
561 br = p->br; 626 br = p->br;
562 627
563 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) || 628 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
@@ -578,28 +643,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
578 643
579 nf_bridge_pull_encap_header_rcsum(skb); 644 nf_bridge_pull_encap_header_rcsum(skb);
580 645
581 if (!pskb_may_pull(skb, sizeof(struct iphdr))) 646 if (br_parse_ip_options(skb))
582 goto inhdr_error; 647 return NF_DROP;
583
584 iph = ip_hdr(skb);
585 if (iph->ihl < 5 || iph->version != 4)
586 goto inhdr_error;
587
588 if (!pskb_may_pull(skb, 4 * iph->ihl))
589 goto inhdr_error;
590
591 iph = ip_hdr(skb);
592 if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
593 goto inhdr_error;
594
595 len = ntohs(iph->tot_len);
596 if (skb->len < len || len < 4 * iph->ihl)
597 goto inhdr_error;
598
599 pskb_trim_rcsum(skb, len);
600
601 /* BUG: Should really parse the IP options here. */
602 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
603 648
604 nf_bridge_put(skb->nf_bridge); 649 nf_bridge_put(skb->nf_bridge);
605 if (!nf_bridge_alloc(skb)) 650 if (!nf_bridge_alloc(skb))
@@ -613,11 +658,6 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
613 br_nf_pre_routing_finish); 658 br_nf_pre_routing_finish);
614 659
615 return NF_STOLEN; 660 return NF_STOLEN;
616
617inhdr_error:
618// IP_INC_STATS_BH(IpInHdrErrors);
619out:
620 return NF_DROP;
621} 661}
622 662
623 663
@@ -707,6 +747,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
707 nf_bridge->mask |= BRNF_PKT_TYPE; 747 nf_bridge->mask |= BRNF_PKT_TYPE;
708 } 748 }
709 749
750 if (pf == PF_INET && br_parse_ip_options(skb))
751 return NF_DROP;
752
710 /* The physdev module checks on this */ 753 /* The physdev module checks on this */
711 nf_bridge->mask |= BRNF_BRIDGED; 754 nf_bridge->mask |= BRNF_BRIDGED;
712 nf_bridge->physoutdev = skb->dev; 755 nf_bridge->physoutdev = skb->dev;
@@ -759,14 +802,19 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
759#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE) 802#if defined(CONFIG_NF_CONNTRACK_IPV4) || defined(CONFIG_NF_CONNTRACK_IPV4_MODULE)
760static int br_nf_dev_queue_xmit(struct sk_buff *skb) 803static int br_nf_dev_queue_xmit(struct sk_buff *skb)
761{ 804{
805 int ret;
806
762 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) && 807 if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
763 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu && 808 skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
764 !skb_is_gso(skb)) { 809 !skb_is_gso(skb)) {
765 /* BUG: Should really parse the IP options here. */ 810 if (br_parse_ip_options(skb))
766 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 811 /* Drop invalid packet */
767 return ip_fragment(skb, br_dev_queue_push_xmit); 812 return NF_DROP;
813 ret = ip_fragment(skb, br_dev_queue_push_xmit);
768 } else 814 } else
769 return br_dev_queue_push_xmit(skb); 815 ret = br_dev_queue_push_xmit(skb);
816
817 return ret;
770} 818}
771#else 819#else
772static int br_nf_dev_queue_xmit(struct sk_buff *skb) 820static int br_nf_dev_queue_xmit(struct sk_buff *skb)
@@ -954,15 +1002,22 @@ int __init br_netfilter_init(void)
954{ 1002{
955 int ret; 1003 int ret;
956 1004
957 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 1005 ret = dst_entries_init(&fake_dst_ops);
958 if (ret < 0) 1006 if (ret < 0)
959 return ret; 1007 return ret;
1008
1009 ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1010 if (ret < 0) {
1011 dst_entries_destroy(&fake_dst_ops);
1012 return ret;
1013 }
960#ifdef CONFIG_SYSCTL 1014#ifdef CONFIG_SYSCTL
961 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); 1015 brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
962 if (brnf_sysctl_header == NULL) { 1016 if (brnf_sysctl_header == NULL) {
963 printk(KERN_WARNING 1017 printk(KERN_WARNING
964 "br_netfilter: can't register to sysctl.\n"); 1018 "br_netfilter: can't register to sysctl.\n");
965 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 1019 nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1020 dst_entries_destroy(&fake_dst_ops);
966 return -ENOMEM; 1021 return -ENOMEM;
967 } 1022 }
968#endif 1023#endif
@@ -976,4 +1031,5 @@ void br_netfilter_fini(void)
976#ifdef CONFIG_SYSCTL 1031#ifdef CONFIG_SYSCTL
977 unregister_sysctl_table(brnf_sysctl_header); 1032 unregister_sysctl_table(brnf_sysctl_header);
978#endif 1033#endif
1034 dst_entries_destroy(&fake_dst_ops);
979} 1035}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4a6a378c84e3..ffb0dc4cc0e8 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -12,9 +12,11 @@
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/etherdevice.h>
15#include <net/rtnetlink.h> 16#include <net/rtnetlink.h>
16#include <net/net_namespace.h> 17#include <net/net_namespace.h>
17#include <net/sock.h> 18#include <net/sock.h>
19
18#include "br_private.h" 20#include "br_private.h"
19 21
20static inline size_t br_nlmsg_size(void) 22static inline size_t br_nlmsg_size(void)
@@ -118,12 +120,15 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
118 int idx; 120 int idx;
119 121
120 idx = 0; 122 idx = 0;
121 for_each_netdev(net, dev) { 123 rcu_read_lock();
124 for_each_netdev_rcu(net, dev) {
125 struct net_bridge_port *port = br_port_get_rcu(dev);
126
122 /* not a bridge port */ 127 /* not a bridge port */
123 if (!br_port_exists(dev) || idx < cb->args[0]) 128 if (!port || idx < cb->args[0])
124 goto skip; 129 goto skip;
125 130
126 if (br_fill_ifinfo(skb, br_port_get(dev), 131 if (br_fill_ifinfo(skb, port,
127 NETLINK_CB(cb->skb).pid, 132 NETLINK_CB(cb->skb).pid,
128 cb->nlh->nlmsg_seq, RTM_NEWLINK, 133 cb->nlh->nlmsg_seq, RTM_NEWLINK,
129 NLM_F_MULTI) < 0) 134 NLM_F_MULTI) < 0)
@@ -131,7 +136,7 @@ static int br_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
131skip: 136skip:
132 ++idx; 137 ++idx;
133 } 138 }
134 139 rcu_read_unlock();
135 cb->args[0] = idx; 140 cb->args[0] = idx;
136 141
137 return skb->len; 142 return skb->len;
@@ -169,9 +174,9 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
169 if (!dev) 174 if (!dev)
170 return -ENODEV; 175 return -ENODEV;
171 176
172 if (!br_port_exists(dev)) 177 p = br_port_get_rtnl(dev);
178 if (!p)
173 return -EINVAL; 179 return -EINVAL;
174 p = br_port_get(dev);
175 180
176 /* if kernel STP is running, don't allow changes */ 181 /* if kernel STP is running, don't allow changes */
177 if (p->br->stp_enabled == BR_KERNEL_STP) 182 if (p->br->stp_enabled == BR_KERNEL_STP)
@@ -186,20 +191,61 @@ static int br_rtm_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
186 return 0; 191 return 0;
187} 192}
188 193
194static int br_validate(struct nlattr *tb[], struct nlattr *data[])
195{
196 if (tb[IFLA_ADDRESS]) {
197 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
198 return -EINVAL;
199 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
200 return -EADDRNOTAVAIL;
201 }
202
203 return 0;
204}
205
206static struct rtnl_link_ops br_link_ops __read_mostly = {
207 .kind = "bridge",
208 .priv_size = sizeof(struct net_bridge),
209 .setup = br_dev_setup,
210 .validate = br_validate,
211};
189 212
190int __init br_netlink_init(void) 213int __init br_netlink_init(void)
191{ 214{
192 if (__rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo)) 215 int err;
193 return -ENOBUFS;
194 216
195 /* Only the first call to __rtnl_register can fail */ 217 err = rtnl_link_register(&br_link_ops);
196 __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL); 218 if (err < 0)
219 goto err1;
220
221 err = __rtnl_register(PF_BRIDGE, RTM_GETLINK, NULL, br_dump_ifinfo);
222 if (err)
223 goto err2;
224 err = __rtnl_register(PF_BRIDGE, RTM_SETLINK, br_rtm_setlink, NULL);
225 if (err)
226 goto err3;
227 err = __rtnl_register(PF_BRIDGE, RTM_NEWNEIGH, br_fdb_add, NULL);
228 if (err)
229 goto err3;
230 err = __rtnl_register(PF_BRIDGE, RTM_DELNEIGH, br_fdb_delete, NULL);
231 if (err)
232 goto err3;
233 err = __rtnl_register(PF_BRIDGE, RTM_GETNEIGH, NULL, br_fdb_dump);
234 if (err)
235 goto err3;
197 236
198 return 0; 237 return 0;
238
239err3:
240 rtnl_unregister_all(PF_BRIDGE);
241err2:
242 rtnl_link_unregister(&br_link_ops);
243err1:
244 return err;
199} 245}
200 246
201void __exit br_netlink_fini(void) 247void __exit br_netlink_fini(void)
202{ 248{
249 rtnl_link_unregister(&br_link_ops);
203 rtnl_unregister_all(PF_BRIDGE); 250 rtnl_unregister_all(PF_BRIDGE);
204} 251}
205
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 404d4e14c6a7..6545ee9591d1 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,15 +32,21 @@ struct notifier_block br_device_notifier = {
32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr) 32static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
33{ 33{
34 struct net_device *dev = ptr; 34 struct net_device *dev = ptr;
35 struct net_bridge_port *p = br_port_get(dev); 35 struct net_bridge_port *p;
36 struct net_bridge *br; 36 struct net_bridge *br;
37 int err; 37 int err;
38 38
39 /* register of bridge completed, add sysfs entries */
40 if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
41 br_sysfs_addbr(dev);
42 return NOTIFY_DONE;
43 }
44
39 /* not a port of a bridge */ 45 /* not a port of a bridge */
40 if (!br_port_exists(dev)) 46 p = br_port_get_rtnl(dev);
47 if (!p)
41 return NOTIFY_DONE; 48 return NOTIFY_DONE;
42 49
43 p = br_port_get(dev);
44 br = p->br; 50 br = p->br;
45 51
46 switch (event) { 52 switch (event) {
@@ -60,10 +66,7 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
60 break; 66 break;
61 67
62 case NETDEV_FEAT_CHANGE: 68 case NETDEV_FEAT_CHANGE:
63 spin_lock_bh(&br->lock); 69 netdev_update_features(br->dev);
64 if (netif_running(br->dev))
65 br_features_recompute(br);
66 spin_unlock_bh(&br->lock);
67 break; 70 break;
68 71
69 case NETDEV_DOWN: 72 case NETDEV_DOWN:
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 75c90edaf7db..54578f274d85 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -64,7 +64,8 @@ struct net_bridge_fdb_entry
64 struct net_bridge_port *dst; 64 struct net_bridge_port *dst;
65 65
66 struct rcu_head rcu; 66 struct rcu_head rcu;
67 unsigned long ageing_timer; 67 unsigned long updated;
68 unsigned long used;
68 mac_addr addr; 69 mac_addr addr;
69 unsigned char is_local; 70 unsigned char is_local;
70 unsigned char is_static; 71 unsigned char is_static;
@@ -72,7 +73,7 @@ struct net_bridge_fdb_entry
72 73
73struct net_bridge_port_group { 74struct net_bridge_port_group {
74 struct net_bridge_port *port; 75 struct net_bridge_port *port;
75 struct net_bridge_port_group *next; 76 struct net_bridge_port_group __rcu *next;
76 struct hlist_node mglist; 77 struct hlist_node mglist;
77 struct rcu_head rcu; 78 struct rcu_head rcu;
78 struct timer_list timer; 79 struct timer_list timer;
@@ -84,13 +85,13 @@ struct net_bridge_port_group {
84struct net_bridge_mdb_entry 85struct net_bridge_mdb_entry
85{ 86{
86 struct hlist_node hlist[2]; 87 struct hlist_node hlist[2];
87 struct hlist_node mglist;
88 struct net_bridge *br; 88 struct net_bridge *br;
89 struct net_bridge_port_group *ports; 89 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 90 struct rcu_head rcu;
91 struct timer_list timer; 91 struct timer_list timer;
92 struct timer_list query_timer; 92 struct timer_list query_timer;
93 struct br_ip addr; 93 struct br_ip addr;
94 bool mglist;
94 u32 queries_sent; 95 u32 queries_sent;
95}; 96};
96 97
@@ -151,11 +152,20 @@ struct net_bridge_port
151#endif 152#endif
152}; 153};
153 154
154#define br_port_get_rcu(dev) \
155 ((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
156#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
157#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT) 155#define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
158 156
157static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
158{
159 struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
160 return br_port_exists(dev) ? port : NULL;
161}
162
163static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
164{
165 return br_port_exists(dev) ?
166 rtnl_dereference(dev->rx_handler_data) : NULL;
167}
168
159struct br_cpu_netstats { 169struct br_cpu_netstats {
160 u64 rx_packets; 170 u64 rx_packets;
161 u64 rx_bytes; 171 u64 rx_bytes;
@@ -173,7 +183,6 @@ struct net_bridge
173 struct br_cpu_netstats __percpu *stats; 183 struct br_cpu_netstats __percpu *stats;
174 spinlock_t hash_lock; 184 spinlock_t hash_lock;
175 struct hlist_head hash[BR_HASH_SIZE]; 185 struct hlist_head hash[BR_HASH_SIZE];
176 unsigned long feature_mask;
177#ifdef CONFIG_BRIDGE_NETFILTER 186#ifdef CONFIG_BRIDGE_NETFILTER
178 struct rtable fake_rtable; 187 struct rtable fake_rtable;
179 bool nf_call_iptables; 188 bool nf_call_iptables;
@@ -227,9 +236,8 @@ struct net_bridge
227 unsigned long multicast_startup_query_interval; 236 unsigned long multicast_startup_query_interval;
228 237
229 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
230 struct net_bridge_mdb_htable *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
231 struct hlist_head router_list; 240 struct hlist_head router_list;
232 struct hlist_head mglist;
233 241
234 struct timer_list multicast_router_timer; 242 struct timer_list multicast_router_timer;
235 struct timer_list multicast_querier_timer; 243 struct timer_list multicast_querier_timer;
@@ -345,6 +353,9 @@ extern int br_fdb_insert(struct net_bridge *br,
345extern void br_fdb_update(struct net_bridge *br, 353extern void br_fdb_update(struct net_bridge *br,
346 struct net_bridge_port *source, 354 struct net_bridge_port *source,
347 const unsigned char *addr); 355 const unsigned char *addr);
356extern int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb);
357extern int br_fdb_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
358extern int br_fdb_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg);
348 359
349/* br_forward.c */ 360/* br_forward.c */
350extern void br_deliver(const struct net_bridge_port *to, 361extern void br_deliver(const struct net_bridge_port *to,
@@ -367,11 +378,11 @@ extern int br_add_if(struct net_bridge *br,
367extern int br_del_if(struct net_bridge *br, 378extern int br_del_if(struct net_bridge *br,
368 struct net_device *dev); 379 struct net_device *dev);
369extern int br_min_mtu(const struct net_bridge *br); 380extern int br_min_mtu(const struct net_bridge *br);
370extern void br_features_recompute(struct net_bridge *br); 381extern u32 br_features_recompute(struct net_bridge *br, u32 features);
371 382
372/* br_input.c */ 383/* br_input.c */
373extern int br_handle_frame_finish(struct sk_buff *skb); 384extern int br_handle_frame_finish(struct sk_buff *skb);
374extern struct sk_buff *br_handle_frame(struct sk_buff *skb); 385extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
375 386
376/* br_ioctl.c */ 387/* br_ioctl.c */
377extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 388extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
@@ -483,20 +494,25 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br,
483extern void br_init_port(struct net_bridge_port *p); 494extern void br_init_port(struct net_bridge_port *p);
484extern void br_become_designated_port(struct net_bridge_port *p); 495extern void br_become_designated_port(struct net_bridge_port *p);
485 496
497extern int br_set_forward_delay(struct net_bridge *br, unsigned long x);
498extern int br_set_hello_time(struct net_bridge *br, unsigned long x);
499extern int br_set_max_age(struct net_bridge *br, unsigned long x);
500
501
486/* br_stp_if.c */ 502/* br_stp_if.c */
487extern void br_stp_enable_bridge(struct net_bridge *br); 503extern void br_stp_enable_bridge(struct net_bridge *br);
488extern void br_stp_disable_bridge(struct net_bridge *br); 504extern void br_stp_disable_bridge(struct net_bridge *br);
489extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val); 505extern void br_stp_set_enabled(struct net_bridge *br, unsigned long val);
490extern void br_stp_enable_port(struct net_bridge_port *p); 506extern void br_stp_enable_port(struct net_bridge_port *p);
491extern void br_stp_disable_port(struct net_bridge_port *p); 507extern void br_stp_disable_port(struct net_bridge_port *p);
492extern void br_stp_recalculate_bridge_id(struct net_bridge *br); 508extern bool br_stp_recalculate_bridge_id(struct net_bridge *br);
493extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a); 509extern void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *a);
494extern void br_stp_set_bridge_priority(struct net_bridge *br, 510extern void br_stp_set_bridge_priority(struct net_bridge *br,
495 u16 newprio); 511 u16 newprio);
496extern void br_stp_set_port_priority(struct net_bridge_port *p, 512extern int br_stp_set_port_priority(struct net_bridge_port *p,
497 u8 newprio); 513 unsigned long newprio);
498extern void br_stp_set_path_cost(struct net_bridge_port *p, 514extern int br_stp_set_path_cost(struct net_bridge_port *p,
499 u32 path_cost); 515 unsigned long path_cost);
500extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 516extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
501 517
502/* br_stp_bpdu.c */ 518/* br_stp_bpdu.c */
diff --git a/net/bridge/br_private_stp.h b/net/bridge/br_private_stp.h
index 8b650f7fbfa0..642ef47a867e 100644
--- a/net/bridge/br_private_stp.h
+++ b/net/bridge/br_private_stp.h
@@ -16,6 +16,19 @@
16#define BPDU_TYPE_CONFIG 0 16#define BPDU_TYPE_CONFIG 0
17#define BPDU_TYPE_TCN 0x80 17#define BPDU_TYPE_TCN 0x80
18 18
19/* IEEE 802.1D-1998 timer values */
20#define BR_MIN_HELLO_TIME (1*HZ)
21#define BR_MAX_HELLO_TIME (10*HZ)
22
23#define BR_MIN_FORWARD_DELAY (2*HZ)
24#define BR_MAX_FORWARD_DELAY (30*HZ)
25
26#define BR_MIN_MAX_AGE (6*HZ)
27#define BR_MAX_MAX_AGE (40*HZ)
28
29#define BR_MIN_PATH_COST 1
30#define BR_MAX_PATH_COST 65535
31
19struct br_config_bpdu 32struct br_config_bpdu
20{ 33{
21 unsigned topology_change:1; 34 unsigned topology_change:1;
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 57186d84d2bd..bb4383e84de9 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -375,12 +375,12 @@ static void br_make_forwarding(struct net_bridge_port *p)
375 if (p->state != BR_STATE_BLOCKING) 375 if (p->state != BR_STATE_BLOCKING)
376 return; 376 return;
377 377
378 if (br->forward_delay == 0) { 378 if (br->stp_enabled == BR_NO_STP || br->forward_delay == 0) {
379 p->state = BR_STATE_FORWARDING; 379 p->state = BR_STATE_FORWARDING;
380 br_topology_change_detection(br); 380 br_topology_change_detection(br);
381 del_timer(&p->forward_delay_timer); 381 del_timer(&p->forward_delay_timer);
382 } 382 }
383 else if (p->br->stp_enabled == BR_KERNEL_STP) 383 else if (br->stp_enabled == BR_KERNEL_STP)
384 p->state = BR_STATE_LISTENING; 384 p->state = BR_STATE_LISTENING;
385 else 385 else
386 p->state = BR_STATE_LEARNING; 386 p->state = BR_STATE_LEARNING;
@@ -397,28 +397,37 @@ static void br_make_forwarding(struct net_bridge_port *p)
397void br_port_state_selection(struct net_bridge *br) 397void br_port_state_selection(struct net_bridge *br)
398{ 398{
399 struct net_bridge_port *p; 399 struct net_bridge_port *p;
400 unsigned int liveports = 0;
400 401
401 /* Don't change port states if userspace is handling STP */ 402 /* Don't change port states if userspace is handling STP */
402 if (br->stp_enabled == BR_USER_STP) 403 if (br->stp_enabled == BR_USER_STP)
403 return; 404 return;
404 405
405 list_for_each_entry(p, &br->port_list, list) { 406 list_for_each_entry(p, &br->port_list, list) {
406 if (p->state != BR_STATE_DISABLED) { 407 if (p->state == BR_STATE_DISABLED)
407 if (p->port_no == br->root_port) { 408 continue;
408 p->config_pending = 0; 409
409 p->topology_change_ack = 0; 410 if (p->port_no == br->root_port) {
410 br_make_forwarding(p); 411 p->config_pending = 0;
411 } else if (br_is_designated_port(p)) { 412 p->topology_change_ack = 0;
412 del_timer(&p->message_age_timer); 413 br_make_forwarding(p);
413 br_make_forwarding(p); 414 } else if (br_is_designated_port(p)) {
414 } else { 415 del_timer(&p->message_age_timer);
415 p->config_pending = 0; 416 br_make_forwarding(p);
416 p->topology_change_ack = 0; 417 } else {
417 br_make_blocking(p); 418 p->config_pending = 0;
418 } 419 p->topology_change_ack = 0;
420 br_make_blocking(p);
419 } 421 }
420 422
423 if (p->state == BR_STATE_FORWARDING)
424 ++liveports;
421 } 425 }
426
427 if (liveports == 0)
428 netif_carrier_off(br->dev);
429 else
430 netif_carrier_on(br->dev);
422} 431}
423 432
424/* called under bridge lock */ 433/* called under bridge lock */
@@ -475,3 +484,51 @@ void br_received_tcn_bpdu(struct net_bridge_port *p)
475 br_topology_change_acknowledge(p); 484 br_topology_change_acknowledge(p);
476 } 485 }
477} 486}
487
488/* Change bridge STP parameter */
489int br_set_hello_time(struct net_bridge *br, unsigned long val)
490{
491 unsigned long t = clock_t_to_jiffies(val);
492
493 if (t < BR_MIN_HELLO_TIME || t > BR_MAX_HELLO_TIME)
494 return -ERANGE;
495
496 spin_lock_bh(&br->lock);
497 br->bridge_hello_time = t;
498 if (br_is_root_bridge(br))
499 br->hello_time = br->bridge_hello_time;
500 spin_unlock_bh(&br->lock);
501 return 0;
502}
503
504int br_set_max_age(struct net_bridge *br, unsigned long val)
505{
506 unsigned long t = clock_t_to_jiffies(val);
507
508 if (t < BR_MIN_MAX_AGE || t > BR_MAX_MAX_AGE)
509 return -ERANGE;
510
511 spin_lock_bh(&br->lock);
512 br->bridge_max_age = t;
513 if (br_is_root_bridge(br))
514 br->max_age = br->bridge_max_age;
515 spin_unlock_bh(&br->lock);
516 return 0;
517
518}
519
520int br_set_forward_delay(struct net_bridge *br, unsigned long val)
521{
522 unsigned long t = clock_t_to_jiffies(val);
523
524 if (br->stp_enabled != BR_NO_STP &&
525 (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY))
526 return -ERANGE;
527
528 spin_lock_bh(&br->lock);
529 br->bridge_forward_delay = t;
530 if (br_is_root_bridge(br))
531 br->forward_delay = br->bridge_forward_delay;
532 spin_unlock_bh(&br->lock);
533 return 0;
534}
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 35cf27087b56..289646ec9b7b 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -50,6 +50,8 @@ static void br_send_bpdu(struct net_bridge_port *p,
50 50
51 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr); 51 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
52 52
53 skb_reset_mac_header(skb);
54
53 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 55 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
54 dev_queue_xmit); 56 dev_queue_xmit);
55} 57}
@@ -141,10 +143,6 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
141 struct net_bridge *br; 143 struct net_bridge *br;
142 const unsigned char *buf; 144 const unsigned char *buf;
143 145
144 if (!br_port_exists(dev))
145 goto err;
146 p = br_port_get_rcu(dev);
147
148 if (!pskb_may_pull(skb, 4)) 146 if (!pskb_may_pull(skb, 4))
149 goto err; 147 goto err;
150 148
@@ -153,6 +151,10 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0) 151 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err; 152 goto err;
155 153
154 p = br_port_get_rcu(dev);
155 if (!p)
156 goto err;
157
156 br = p->br; 158 br = p->br;
157 spin_lock(&br->lock); 159 spin_lock(&br->lock);
158 160
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 1d8826914cbf..6f615b8192f4 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -20,7 +20,7 @@
20 20
21 21
22/* Port id is composed of priority and port number. 22/* Port id is composed of priority and port number.
23 * NB: least significant bits of priority are dropped to 23 * NB: some bits of priority are dropped to
24 * make room for more ports. 24 * make room for more ports.
25 */ 25 */
26static inline port_id br_make_port_id(__u8 priority, __u16 port_no) 26static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
@@ -29,6 +29,8 @@ static inline port_id br_make_port_id(__u8 priority, __u16 port_no)
29 | (port_no & ((1<<BR_PORT_BITS)-1)); 29 | (port_no & ((1<<BR_PORT_BITS)-1));
30} 30}
31 31
32#define BR_MAX_PORT_PRIORITY ((u16)~0 >> BR_PORT_BITS)
33
32/* called under bridge lock */ 34/* called under bridge lock */
33void br_init_port(struct net_bridge_port *p) 35void br_init_port(struct net_bridge_port *p)
34{ 36{
@@ -145,7 +147,7 @@ static void br_stp_stop(struct net_bridge *br)
145 char *envp[] = { NULL }; 147 char *envp[] = { NULL };
146 148
147 if (br->stp_enabled == BR_USER_STP) { 149 if (br->stp_enabled == BR_USER_STP) {
148 r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); 150 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
149 br_info(br, "userspace STP stopped, return code %d\n", r); 151 br_info(br, "userspace STP stopped, return code %d\n", r);
150 152
151 /* To start timers on any ports left in blocking */ 153 /* To start timers on any ports left in blocking */
@@ -204,7 +206,7 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
204static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1]; 206static const unsigned short br_mac_zero_aligned[ETH_ALEN >> 1];
205 207
206/* called under bridge lock */ 208/* called under bridge lock */
207void br_stp_recalculate_bridge_id(struct net_bridge *br) 209bool br_stp_recalculate_bridge_id(struct net_bridge *br)
208{ 210{
209 const unsigned char *br_mac_zero = 211 const unsigned char *br_mac_zero =
210 (const unsigned char *)br_mac_zero_aligned; 212 (const unsigned char *)br_mac_zero_aligned;
@@ -213,7 +215,7 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br)
213 215
214 /* user has chosen a value so keep it */ 216 /* user has chosen a value so keep it */
215 if (br->flags & BR_SET_MAC_ADDR) 217 if (br->flags & BR_SET_MAC_ADDR)
216 return; 218 return false;
217 219
218 list_for_each_entry(p, &br->port_list, list) { 220 list_for_each_entry(p, &br->port_list, list) {
219 if (addr == br_mac_zero || 221 if (addr == br_mac_zero ||
@@ -222,8 +224,11 @@ void br_stp_recalculate_bridge_id(struct net_bridge *br)
222 224
223 } 225 }
224 226
225 if (compare_ether_addr(br->bridge_id.addr, addr)) 227 if (compare_ether_addr(br->bridge_id.addr, addr) == 0)
226 br_stp_change_bridge_id(br, addr); 228 return false; /* no change */
229
230 br_stp_change_bridge_id(br, addr);
231 return true;
227} 232}
228 233
229/* called under bridge lock */ 234/* called under bridge lock */
@@ -252,10 +257,14 @@ void br_stp_set_bridge_priority(struct net_bridge *br, u16 newprio)
252} 257}
253 258
254/* called under bridge lock */ 259/* called under bridge lock */
255void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio) 260int br_stp_set_port_priority(struct net_bridge_port *p, unsigned long newprio)
256{ 261{
257 port_id new_port_id = br_make_port_id(newprio, p->port_no); 262 port_id new_port_id;
263
264 if (newprio > BR_MAX_PORT_PRIORITY)
265 return -ERANGE;
258 266
267 new_port_id = br_make_port_id(newprio, p->port_no);
259 if (br_is_designated_port(p)) 268 if (br_is_designated_port(p))
260 p->designated_port = new_port_id; 269 p->designated_port = new_port_id;
261 270
@@ -266,14 +275,21 @@ void br_stp_set_port_priority(struct net_bridge_port *p, u8 newprio)
266 br_become_designated_port(p); 275 br_become_designated_port(p);
267 br_port_state_selection(p->br); 276 br_port_state_selection(p->br);
268 } 277 }
278
279 return 0;
269} 280}
270 281
271/* called under bridge lock */ 282/* called under bridge lock */
272void br_stp_set_path_cost(struct net_bridge_port *p, u32 path_cost) 283int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)
273{ 284{
285 if (path_cost < BR_MIN_PATH_COST ||
286 path_cost > BR_MAX_PATH_COST)
287 return -ERANGE;
288
274 p->path_cost = path_cost; 289 p->path_cost = path_cost;
275 br_configuration_update(p->br); 290 br_configuration_update(p->br);
276 br_port_state_selection(p->br); 291 br_port_state_selection(p->br);
292 return 0;
277} 293}
278 294
279ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id) 295ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id)
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7b22456023c5..3e965140051e 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -94,6 +94,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
94 p->state = BR_STATE_FORWARDING; 94 p->state = BR_STATE_FORWARDING;
95 if (br_is_designated_for_some_port(br)) 95 if (br_is_designated_for_some_port(br))
96 br_topology_change_detection(br); 96 br_topology_change_detection(br);
97 netif_carrier_on(br->dev);
97 } 98 }
98 br_log_state(p); 99 br_log_state(p);
99 spin_unlock(&br->lock); 100 spin_unlock(&br->lock);
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 5c1e5559ebba..68b893ea8c3a 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -43,9 +43,7 @@ static ssize_t store_bridge_parm(struct device *d,
43 if (endp == buf) 43 if (endp == buf)
44 return -EINVAL; 44 return -EINVAL;
45 45
46 spin_lock_bh(&br->lock);
47 err = (*set)(br, val); 46 err = (*set)(br, val);
48 spin_unlock_bh(&br->lock);
49 return err ? err : len; 47 return err ? err : len;
50} 48}
51 49
@@ -57,20 +55,11 @@ static ssize_t show_forward_delay(struct device *d,
57 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay)); 55 return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
58} 56}
59 57
60static int set_forward_delay(struct net_bridge *br, unsigned long val)
61{
62 unsigned long delay = clock_t_to_jiffies(val);
63 br->forward_delay = delay;
64 if (br_is_root_bridge(br))
65 br->bridge_forward_delay = delay;
66 return 0;
67}
68
69static ssize_t store_forward_delay(struct device *d, 58static ssize_t store_forward_delay(struct device *d,
70 struct device_attribute *attr, 59 struct device_attribute *attr,
71 const char *buf, size_t len) 60 const char *buf, size_t len)
72{ 61{
73 return store_bridge_parm(d, buf, len, set_forward_delay); 62 return store_bridge_parm(d, buf, len, br_set_forward_delay);
74} 63}
75static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR, 64static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
76 show_forward_delay, store_forward_delay); 65 show_forward_delay, store_forward_delay);
@@ -82,24 +71,11 @@ static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
82 jiffies_to_clock_t(to_bridge(d)->hello_time)); 71 jiffies_to_clock_t(to_bridge(d)->hello_time));
83} 72}
84 73
85static int set_hello_time(struct net_bridge *br, unsigned long val)
86{
87 unsigned long t = clock_t_to_jiffies(val);
88
89 if (t < HZ)
90 return -EINVAL;
91
92 br->hello_time = t;
93 if (br_is_root_bridge(br))
94 br->bridge_hello_time = t;
95 return 0;
96}
97
98static ssize_t store_hello_time(struct device *d, 74static ssize_t store_hello_time(struct device *d,
99 struct device_attribute *attr, const char *buf, 75 struct device_attribute *attr, const char *buf,
100 size_t len) 76 size_t len)
101{ 77{
102 return store_bridge_parm(d, buf, len, set_hello_time); 78 return store_bridge_parm(d, buf, len, br_set_hello_time);
103} 79}
104static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time, 80static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
105 store_hello_time); 81 store_hello_time);
@@ -111,19 +87,10 @@ static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
111 jiffies_to_clock_t(to_bridge(d)->max_age)); 87 jiffies_to_clock_t(to_bridge(d)->max_age));
112} 88}
113 89
114static int set_max_age(struct net_bridge *br, unsigned long val)
115{
116 unsigned long t = clock_t_to_jiffies(val);
117 br->max_age = t;
118 if (br_is_root_bridge(br))
119 br->bridge_max_age = t;
120 return 0;
121}
122
123static ssize_t store_max_age(struct device *d, struct device_attribute *attr, 90static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
124 const char *buf, size_t len) 91 const char *buf, size_t len)
125{ 92{
126 return store_bridge_parm(d, buf, len, set_max_age); 93 return store_bridge_parm(d, buf, len, br_set_max_age);
127} 94}
128static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age); 95static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
129 96
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index fd5799c9bc8d..6229b62749e8 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -23,7 +23,7 @@
23struct brport_attribute { 23struct brport_attribute {
24 struct attribute attr; 24 struct attribute attr;
25 ssize_t (*show)(struct net_bridge_port *, char *); 25 ssize_t (*show)(struct net_bridge_port *, char *);
26 ssize_t (*store)(struct net_bridge_port *, unsigned long); 26 int (*store)(struct net_bridge_port *, unsigned long);
27}; 27};
28 28
29#define BRPORT_ATTR(_name,_mode,_show,_store) \ 29#define BRPORT_ATTR(_name,_mode,_show,_store) \
@@ -38,27 +38,17 @@ static ssize_t show_path_cost(struct net_bridge_port *p, char *buf)
38{ 38{
39 return sprintf(buf, "%d\n", p->path_cost); 39 return sprintf(buf, "%d\n", p->path_cost);
40} 40}
41static ssize_t store_path_cost(struct net_bridge_port *p, unsigned long v) 41
42{
43 br_stp_set_path_cost(p, v);
44 return 0;
45}
46static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR, 42static BRPORT_ATTR(path_cost, S_IRUGO | S_IWUSR,
47 show_path_cost, store_path_cost); 43 show_path_cost, br_stp_set_path_cost);
48 44
49static ssize_t show_priority(struct net_bridge_port *p, char *buf) 45static ssize_t show_priority(struct net_bridge_port *p, char *buf)
50{ 46{
51 return sprintf(buf, "%d\n", p->priority); 47 return sprintf(buf, "%d\n", p->priority);
52} 48}
53static ssize_t store_priority(struct net_bridge_port *p, unsigned long v) 49
54{
55 if (v >= (1<<(16-BR_PORT_BITS)))
56 return -ERANGE;
57 br_stp_set_port_priority(p, v);
58 return 0;
59}
60static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR, 50static BRPORT_ATTR(priority, S_IRUGO | S_IWUSR,
61 show_priority, store_priority); 51 show_priority, br_stp_set_port_priority);
62 52
63static ssize_t show_designated_root(struct net_bridge_port *p, char *buf) 53static ssize_t show_designated_root(struct net_bridge_port *p, char *buf)
64{ 54{
@@ -136,7 +126,7 @@ static ssize_t show_hold_timer(struct net_bridge_port *p,
136} 126}
137static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL); 127static BRPORT_ATTR(hold_timer, S_IRUGO, show_hold_timer, NULL);
138 128
139static ssize_t store_flush(struct net_bridge_port *p, unsigned long v) 129static int store_flush(struct net_bridge_port *p, unsigned long v)
140{ 130{
141 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry 131 br_fdb_delete_by_port(p->br, p, 0); // Don't delete local entry
142 return 0; 132 return 0;
@@ -148,7 +138,7 @@ static ssize_t show_hairpin_mode(struct net_bridge_port *p, char *buf)
148 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0; 138 int hairpin_mode = (p->flags & BR_HAIRPIN_MODE) ? 1 : 0;
149 return sprintf(buf, "%d\n", hairpin_mode); 139 return sprintf(buf, "%d\n", hairpin_mode);
150} 140}
151static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v) 141static int store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
152{ 142{
153 if (v) 143 if (v)
154 p->flags |= BR_HAIRPIN_MODE; 144 p->flags |= BR_HAIRPIN_MODE;
@@ -165,7 +155,7 @@ static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
165 return sprintf(buf, "%d\n", p->multicast_router); 155 return sprintf(buf, "%d\n", p->multicast_router);
166} 156}
167 157
168static ssize_t store_multicast_router(struct net_bridge_port *p, 158static int store_multicast_router(struct net_bridge_port *p,
169 unsigned long v) 159 unsigned long v)
170{ 160{
171 return br_multicast_set_port_router(p, v); 161 return br_multicast_set_port_router(p, v);
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 50a46afc2bcc..2ed0056a39a8 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -22,9 +22,15 @@
22#include <linux/netfilter_bridge/ebtables.h> 22#include <linux/netfilter_bridge/ebtables.h>
23#include <linux/netfilter_bridge/ebt_ip6.h> 23#include <linux/netfilter_bridge/ebt_ip6.h>
24 24
25struct tcpudphdr { 25union pkthdr {
26 __be16 src; 26 struct {
27 __be16 dst; 27 __be16 src;
28 __be16 dst;
29 } tcpudphdr;
30 struct {
31 u8 type;
32 u8 code;
33 } icmphdr;
28}; 34};
29 35
30static bool 36static bool
@@ -33,8 +39,8 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
33 const struct ebt_ip6_info *info = par->matchinfo; 39 const struct ebt_ip6_info *info = par->matchinfo;
34 const struct ipv6hdr *ih6; 40 const struct ipv6hdr *ih6;
35 struct ipv6hdr _ip6h; 41 struct ipv6hdr _ip6h;
36 const struct tcpudphdr *pptr; 42 const union pkthdr *pptr;
37 struct tcpudphdr _ports; 43 union pkthdr _pkthdr;
38 44
39 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h); 45 ih6 = skb_header_pointer(skb, 0, sizeof(_ip6h), &_ip6h);
40 if (ih6 == NULL) 46 if (ih6 == NULL)
@@ -56,26 +62,34 @@ ebt_ip6_mt(const struct sk_buff *skb, struct xt_action_param *par)
56 return false; 62 return false;
57 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO)) 63 if (FWINV(info->protocol != nexthdr, EBT_IP6_PROTO))
58 return false; 64 return false;
59 if (!(info->bitmask & EBT_IP6_DPORT) && 65 if (!(info->bitmask & ( EBT_IP6_DPORT |
60 !(info->bitmask & EBT_IP6_SPORT)) 66 EBT_IP6_SPORT | EBT_IP6_ICMP6)))
61 return true; 67 return true;
62 pptr = skb_header_pointer(skb, offset_ph, sizeof(_ports), 68
63 &_ports); 69 /* min icmpv6 headersize is 4, so sizeof(_pkthdr) is ok. */
70 pptr = skb_header_pointer(skb, offset_ph, sizeof(_pkthdr),
71 &_pkthdr);
64 if (pptr == NULL) 72 if (pptr == NULL)
65 return false; 73 return false;
66 if (info->bitmask & EBT_IP6_DPORT) { 74 if (info->bitmask & EBT_IP6_DPORT) {
67 u32 dst = ntohs(pptr->dst); 75 u16 dst = ntohs(pptr->tcpudphdr.dst);
68 if (FWINV(dst < info->dport[0] || 76 if (FWINV(dst < info->dport[0] ||
69 dst > info->dport[1], EBT_IP6_DPORT)) 77 dst > info->dport[1], EBT_IP6_DPORT))
70 return false; 78 return false;
71 } 79 }
72 if (info->bitmask & EBT_IP6_SPORT) { 80 if (info->bitmask & EBT_IP6_SPORT) {
73 u32 src = ntohs(pptr->src); 81 u16 src = ntohs(pptr->tcpudphdr.src);
74 if (FWINV(src < info->sport[0] || 82 if (FWINV(src < info->sport[0] ||
75 src > info->sport[1], EBT_IP6_SPORT)) 83 src > info->sport[1], EBT_IP6_SPORT))
76 return false; 84 return false;
77 } 85 }
78 return true; 86 if ((info->bitmask & EBT_IP6_ICMP6) &&
87 FWINV(pptr->icmphdr.type < info->icmpv6_type[0] ||
88 pptr->icmphdr.type > info->icmpv6_type[1] ||
89 pptr->icmphdr.code < info->icmpv6_code[0] ||
90 pptr->icmphdr.code > info->icmpv6_code[1],
91 EBT_IP6_ICMP6))
92 return false;
79 } 93 }
80 return true; 94 return true;
81} 95}
@@ -103,6 +117,14 @@ static int ebt_ip6_mt_check(const struct xt_mtchk_param *par)
103 return -EINVAL; 117 return -EINVAL;
104 if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1]) 118 if (info->bitmask & EBT_IP6_SPORT && info->sport[0] > info->sport[1])
105 return -EINVAL; 119 return -EINVAL;
120 if (info->bitmask & EBT_IP6_ICMP6) {
121 if ((info->invflags & EBT_IP6_PROTO) ||
122 info->protocol != IPPROTO_ICMPV6)
123 return -EINVAL;
124 if (info->icmpv6_type[0] > info->icmpv6_type[1] ||
125 info->icmpv6_code[0] > info->icmpv6_code[1])
126 return -EINVAL;
127 }
106 return 0; 128 return 0;
107} 129}
108 130
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 87b53b3a921d..eae67bf0446c 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -39,8 +39,6 @@ static bool
39ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par) 39ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
40{ 40{
41 const struct ebt_vlan_info *info = par->matchinfo; 41 const struct ebt_vlan_info *info = par->matchinfo;
42 const struct vlan_hdr *fp;
43 struct vlan_hdr _frame;
44 42
45 unsigned short TCI; /* Whole TCI, given from parsed frame */ 43 unsigned short TCI; /* Whole TCI, given from parsed frame */
46 unsigned short id; /* VLAN ID, given from frame TCI */ 44 unsigned short id; /* VLAN ID, given from frame TCI */
@@ -48,9 +46,20 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
48 /* VLAN encapsulated Type/Length field, given from orig frame */ 46 /* VLAN encapsulated Type/Length field, given from orig frame */
49 __be16 encap; 47 __be16 encap;
50 48
51 fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame); 49 if (vlan_tx_tag_present(skb)) {
52 if (fp == NULL) 50 TCI = vlan_tx_tag_get(skb);
53 return false; 51 encap = skb->protocol;
52 } else {
53 const struct vlan_hdr *fp;
54 struct vlan_hdr _frame;
55
56 fp = skb_header_pointer(skb, 0, sizeof(_frame), &_frame);
57 if (fp == NULL)
58 return false;
59
60 TCI = ntohs(fp->h_vlan_TCI);
61 encap = fp->h_vlan_encapsulated_proto;
62 }
54 63
55 /* Tag Control Information (TCI) consists of the following elements: 64 /* Tag Control Information (TCI) consists of the following elements:
56 * - User_priority. The user_priority field is three bits in length, 65 * - User_priority. The user_priority field is three bits in length,
@@ -59,10 +68,8 @@ ebt_vlan_mt(const struct sk_buff *skb, struct xt_action_param *par)
59 * (CFI) is a single bit flag value. Currently ignored. 68 * (CFI) is a single bit flag value. Currently ignored.
60 * - VLAN Identifier (VID). The VID is encoded as 69 * - VLAN Identifier (VID). The VID is encoded as
61 * an unsigned binary number. */ 70 * an unsigned binary number. */
62 TCI = ntohs(fp->h_vlan_TCI);
63 id = TCI & VLAN_VID_MASK; 71 id = TCI & VLAN_VID_MASK;
64 prio = (TCI >> 13) & 0x7; 72 prio = (TCI >> 13) & 0x7;
65 encap = fp->h_vlan_encapsulated_proto;
66 73
67 /* Checking VLAN Identifier (VID) */ 74 /* Checking VLAN Identifier (VID) */
68 if (GET_BITMASK(EBT_VLAN_ID)) 75 if (GET_BITMASK(EBT_VLAN_ID))
@@ -111,10 +118,10 @@ static int ebt_vlan_mt_check(const struct xt_mtchk_param *par)
111 * 0 - The null VLAN ID. 118 * 0 - The null VLAN ID.
112 * 1 - The default Port VID (PVID) 119 * 1 - The default Port VID (PVID)
113 * 0x0FFF - Reserved for implementation use. 120 * 0x0FFF - Reserved for implementation use.
114 * if_vlan.h: VLAN_GROUP_ARRAY_LEN 4096. */ 121 * if_vlan.h: VLAN_N_VID 4096. */
115 if (GET_BITMASK(EBT_VLAN_ID)) { 122 if (GET_BITMASK(EBT_VLAN_ID)) {
116 if (!!info->id) { /* if id!=0 => check vid range */ 123 if (!!info->id) { /* if id!=0 => check vid range */
117 if (info->id > VLAN_GROUP_ARRAY_LEN) { 124 if (info->id > VLAN_N_VID) {
118 pr_debug("id %d is out of range (1-4096)\n", 125 pr_debug("id %d is out of range (1-4096)\n",
119 info->id); 126 info->id);
120 return -EINVAL; 127 return -EINVAL;
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index ae3f106c3908..1bcaf36ad612 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,7 +87,8 @@ static int __init ebtable_broute_init(void)
87 if (ret < 0) 87 if (ret < 0)
88 return ret; 88 return ret;
89 /* see br_input.c */ 89 /* see br_input.c */
90 rcu_assign_pointer(br_should_route_hook, ebt_broute); 90 rcu_assign_pointer(br_should_route_hook,
91 (br_should_route_hook_t *)ebt_broute);
91 return 0; 92 return 0;
92} 93}
93 94
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bcc102e3be4d..2b5ca1a0054d 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -124,16 +124,24 @@ ebt_dev_check(const char *entry, const struct net_device *device)
124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) 124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125/* process standard matches */ 125/* process standard matches */
126static inline int 126static inline int
127ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h, 127ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128 const struct net_device *in, const struct net_device *out) 128 const struct net_device *in, const struct net_device *out)
129{ 129{
130 const struct ethhdr *h = eth_hdr(skb);
131 const struct net_bridge_port *p;
132 __be16 ethproto;
130 int verdict, i; 133 int verdict, i;
131 134
135 if (vlan_tx_tag_present(skb))
136 ethproto = htons(ETH_P_8021Q);
137 else
138 ethproto = h->h_proto;
139
132 if (e->bitmask & EBT_802_3) { 140 if (e->bitmask & EBT_802_3) {
133 if (FWINV2(ntohs(h->h_proto) >= 1536, EBT_IPROTO)) 141 if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
134 return 1; 142 return 1;
135 } else if (!(e->bitmask & EBT_NOPROTO) && 143 } else if (!(e->bitmask & EBT_NOPROTO) &&
136 FWINV2(e->ethproto != h->h_proto, EBT_IPROTO)) 144 FWINV2(e->ethproto != ethproto, EBT_IPROTO))
137 return 1; 145 return 1;
138 146
139 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN)) 147 if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
@@ -141,13 +149,11 @@ ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
141 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT)) 149 if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
142 return 1; 150 return 1;
143 /* rcu_read_lock()ed by nf_hook_slow */ 151 /* rcu_read_lock()ed by nf_hook_slow */
144 if (in && br_port_exists(in) && 152 if (in && (p = br_port_get_rcu(in)) != NULL &&
145 FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev), 153 FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
146 EBT_ILOGICALIN))
147 return 1; 154 return 1;
148 if (out && br_port_exists(out) && 155 if (out && (p = br_port_get_rcu(out)) != NULL &&
149 FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev), 156 FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
150 EBT_ILOGICALOUT))
151 return 1; 157 return 1;
152 158
153 if (e->bitmask & EBT_SOURCEMAC) { 159 if (e->bitmask & EBT_SOURCEMAC) {
@@ -213,7 +219,7 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
213 base = private->entries; 219 base = private->entries;
214 i = 0; 220 i = 0;
215 while (i < nentries) { 221 while (i < nentries) {
216 if (ebt_basic_match(point, eth_hdr(skb), in, out)) 222 if (ebt_basic_match(point, skb, in, out))
217 goto letscontinue; 223 goto letscontinue;
218 224
219 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0) 225 if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
@@ -1101,6 +1107,8 @@ static int do_replace(struct net *net, const void __user *user,
1101 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter)) 1107 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1102 return -ENOMEM; 1108 return -ENOMEM;
1103 1109
1110 tmp.name[sizeof(tmp.name) - 1] = 0;
1111
1104 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids; 1112 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1105 newinfo = vmalloc(sizeof(*newinfo) + countersize); 1113 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1106 if (!newinfo) 1114 if (!newinfo)
@@ -1141,7 +1149,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1141 void *p; 1149 void *p;
1142 1150
1143 if (input_table == NULL || (repl = input_table->table) == NULL || 1151 if (input_table == NULL || (repl = input_table->table) == NULL ||
1144 repl->entries == 0 || repl->entries_size == 0 || 1152 repl->entries == NULL || repl->entries_size == 0 ||
1145 repl->counters != NULL || input_table->private != NULL) { 1153 repl->counters != NULL || input_table->private != NULL) {
1146 BUGPRINT("Bad table data for ebt_register_table!!!\n"); 1154 BUGPRINT("Bad table data for ebt_register_table!!!\n");
1147 return ERR_PTR(-EINVAL); 1155 return ERR_PTR(-EINVAL);
@@ -1758,6 +1766,7 @@ static int compat_table_info(const struct ebt_table_info *info,
1758 1766
1759 newinfo->entries_size = size; 1767 newinfo->entries_size = size;
1760 1768
1769 xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1761 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, 1770 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1762 entries, newinfo); 1771 entries, newinfo);
1763} 1772}
@@ -1873,15 +1882,14 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1873 struct xt_match *match; 1882 struct xt_match *match;
1874 struct xt_target *wt; 1883 struct xt_target *wt;
1875 void *dst = NULL; 1884 void *dst = NULL;
1876 int off, pad = 0, ret = 0; 1885 int off, pad = 0;
1877 unsigned int size_kern, entry_offset, match_size = mwt->match_size; 1886 unsigned int size_kern, match_size = mwt->match_size;
1878 1887
1879 strlcpy(name, mwt->u.name, sizeof(name)); 1888 strlcpy(name, mwt->u.name, sizeof(name));
1880 1889
1881 if (state->buf_kern_start) 1890 if (state->buf_kern_start)
1882 dst = state->buf_kern_start + state->buf_kern_offset; 1891 dst = state->buf_kern_start + state->buf_kern_offset;
1883 1892
1884 entry_offset = (unsigned char *) mwt - base;
1885 switch (compat_mwt) { 1893 switch (compat_mwt) {
1886 case EBT_COMPAT_MATCH: 1894 case EBT_COMPAT_MATCH:
1887 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, 1895 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
@@ -1924,13 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1924 size_kern = wt->targetsize; 1932 size_kern = wt->targetsize;
1925 module_put(wt->me); 1933 module_put(wt->me);
1926 break; 1934 break;
1927 }
1928 1935
1929 if (!dst) { 1936 default:
1930 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, 1937 return -EINVAL;
1931 off + ebt_compat_entry_padsize());
1932 if (ret < 0)
1933 return ret;
1934 } 1938 }
1935 1939
1936 state->buf_kern_offset += match_size + off; 1940 state->buf_kern_offset += match_size + off;
@@ -2007,50 +2011,6 @@ static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
2007 return growth; 2011 return growth;
2008} 2012}
2009 2013
2010#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2011({ \
2012 unsigned int __i; \
2013 int __ret = 0; \
2014 struct compat_ebt_entry_mwt *__watcher; \
2015 \
2016 for (__i = e->watchers_offset; \
2017 __i < (e)->target_offset; \
2018 __i += __watcher->watcher_size + \
2019 sizeof(struct compat_ebt_entry_mwt)) { \
2020 __watcher = (void *)(e) + __i; \
2021 __ret = fn(__watcher , ## args); \
2022 if (__ret != 0) \
2023 break; \
2024 } \
2025 if (__ret == 0) { \
2026 if (__i != (e)->target_offset) \
2027 __ret = -EINVAL; \
2028 } \
2029 __ret; \
2030})
2031
2032#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2033({ \
2034 unsigned int __i; \
2035 int __ret = 0; \
2036 struct compat_ebt_entry_mwt *__match; \
2037 \
2038 for (__i = sizeof(struct ebt_entry); \
2039 __i < (e)->watchers_offset; \
2040 __i += __match->match_size + \
2041 sizeof(struct compat_ebt_entry_mwt)) { \
2042 __match = (void *)(e) + __i; \
2043 __ret = fn(__match , ## args); \
2044 if (__ret != 0) \
2045 break; \
2046 } \
2047 if (__ret == 0) { \
2048 if (__i != (e)->watchers_offset) \
2049 __ret = -EINVAL; \
2050 } \
2051 __ret; \
2052})
2053
2054/* called for all ebt_entry structures. */ 2014/* called for all ebt_entry structures. */
2055static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2015static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2056 unsigned int *total, 2016 unsigned int *total,
@@ -2123,6 +2083,14 @@ static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2123 } 2083 }
2124 } 2084 }
2125 2085
2086 if (state->buf_kern_start == NULL) {
2087 unsigned int offset = buf_start - (char *) base;
2088
2089 ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2090 if (ret < 0)
2091 return ret;
2092 }
2093
2126 startoff = state->buf_user_offset - startoff; 2094 startoff = state->buf_user_offset - startoff;
2127 2095
2128 BUG_ON(*total < startoff); 2096 BUG_ON(*total < startoff);
@@ -2231,6 +2199,7 @@ static int compat_do_replace(struct net *net, void __user *user,
2231 2199
2232 xt_compat_lock(NFPROTO_BRIDGE); 2200 xt_compat_lock(NFPROTO_BRIDGE);
2233 2201
2202 xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2234 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state); 2203 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2235 if (ret < 0) 2204 if (ret < 0)
2236 goto out_unlock; 2205 goto out_unlock;