aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br.c12
-rw-r--r--net/bridge/br_device.c3
-rw-r--r--net/bridge/br_fdb.c6
-rw-r--r--net/bridge/br_if.c9
-rw-r--r--net/bridge/br_input.c43
-rw-r--r--net/bridge/br_netfilter.c225
-rw-r--r--net/bridge/br_private.h6
-rw-r--r--net/bridge/br_stp_bpdu.c196
-rw-r--r--net/bridge/br_stp_timer.c47
-rw-r--r--net/bridge/br_sysfs_br.c49
-rw-r--r--net/bridge/netfilter/ebtables.c101
12 files changed, 398 insertions, 300 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index db23d59746cf..12265aff7099 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -4,6 +4,7 @@
4 4
5config BRIDGE 5config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC
7 ---help--- 8 ---help---
8 If you say Y here, then your Linux box will be able to act as an 9 If you say Y here, then your Linux box will be able to act as an
9 Ethernet bridge, which means that the different Ethernet segments it 10 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br.c b/net/bridge/br.c
index 188cc1ac49eb..22d806cf40ca 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -19,13 +19,23 @@
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/init.h> 21#include <linux/init.h>
22#include <linux/llc.h>
23#include <net/llc.h>
22 24
23#include "br_private.h" 25#include "br_private.h"
24 26
25int (*br_should_route_hook) (struct sk_buff **pskb) = NULL; 27int (*br_should_route_hook) (struct sk_buff **pskb) = NULL;
26 28
29static struct llc_sap *br_stp_sap;
30
27static int __init br_init(void) 31static int __init br_init(void)
28{ 32{
33 br_stp_sap = llc_sap_open(LLC_SAP_BSPAN, br_stp_rcv);
34 if (!br_stp_sap) {
35 printk(KERN_ERR "bridge: can't register sap for STP\n");
36 return -EBUSY;
37 }
38
29 br_fdb_init(); 39 br_fdb_init();
30 40
31#ifdef CONFIG_BRIDGE_NETFILTER 41#ifdef CONFIG_BRIDGE_NETFILTER
@@ -45,6 +55,8 @@ static int __init br_init(void)
45 55
46static void __exit br_deinit(void) 56static void __exit br_deinit(void)
47{ 57{
58 llc_sap_close(br_stp_sap);
59
48#ifdef CONFIG_BRIDGE_NETFILTER 60#ifdef CONFIG_BRIDGE_NETFILTER
49 br_netfilter_fini(); 61 br_netfilter_fini();
50#endif 62#endif
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 0b33a7b3a00c..0c88a2ac32c1 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -27,6 +27,7 @@ static struct net_device_stats *br_dev_get_stats(struct net_device *dev)
27 return &br->statistics; 27 return &br->statistics;
28} 28}
29 29
30/* net device transmit always called with no BH (preempt_disabled) */
30int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 31int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
31{ 32{
32 struct net_bridge *br = netdev_priv(dev); 33 struct net_bridge *br = netdev_priv(dev);
@@ -39,7 +40,6 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
39 skb->mac.raw = skb->data; 40 skb->mac.raw = skb->data;
40 skb_pull(skb, ETH_HLEN); 41 skb_pull(skb, ETH_HLEN);
41 42
42 rcu_read_lock();
43 if (dest[0] & 1) 43 if (dest[0] & 1)
44 br_flood_deliver(br, skb, 0); 44 br_flood_deliver(br, skb, 0);
45 else if ((dst = __br_fdb_get(br, dest)) != NULL) 45 else if ((dst = __br_fdb_get(br, dest)) != NULL)
@@ -47,7 +47,6 @@ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
47 else 47 else
48 br_flood_deliver(br, skb, 0); 48 br_flood_deliver(br, skb, 0);
49 49
50 rcu_read_unlock();
51 return 0; 50 return 0;
52} 51}
53 52
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 1f08a59b51ea..3a73b8c94271 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -341,7 +341,6 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
341 if (hold_time(br) == 0) 341 if (hold_time(br) == 0)
342 return; 342 return;
343 343
344 rcu_read_lock();
345 fdb = fdb_find(head, addr); 344 fdb = fdb_find(head, addr);
346 if (likely(fdb)) { 345 if (likely(fdb)) {
347 /* attempt to update an entry for a local interface */ 346 /* attempt to update an entry for a local interface */
@@ -356,13 +355,12 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
356 fdb->ageing_timer = jiffies; 355 fdb->ageing_timer = jiffies;
357 } 356 }
358 } else { 357 } else {
359 spin_lock_bh(&br->hash_lock); 358 spin_lock(&br->hash_lock);
360 if (!fdb_find(head, addr)) 359 if (!fdb_find(head, addr))
361 fdb_create(head, source, addr, 0); 360 fdb_create(head, source, addr, 0);
362 /* else we lose race and someone else inserts 361 /* else we lose race and someone else inserts
363 * it first, don't bother updating 362 * it first, don't bother updating
364 */ 363 */
365 spin_unlock_bh(&br->hash_lock); 364 spin_unlock(&br->hash_lock);
366 } 365 }
367 rcu_read_unlock();
368} 366}
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index f36b35edd60c..59eef42d4a42 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -210,7 +210,8 @@ static struct net_device *new_bridge_dev(const char *name)
210 210
211 br->bridge_id.prio[0] = 0x80; 211 br->bridge_id.prio[0] = 0x80;
212 br->bridge_id.prio[1] = 0x00; 212 br->bridge_id.prio[1] = 0x00;
213 memset(br->bridge_id.addr, 0, ETH_ALEN); 213
214 memcpy(br->group_addr, br_group_address, ETH_ALEN);
214 215
215 br->feature_mask = dev->features; 216 br->feature_mask = dev->features;
216 br->stp_enabled = 0; 217 br->stp_enabled = 0;
@@ -237,12 +238,11 @@ static int find_portno(struct net_bridge *br)
237 struct net_bridge_port *p; 238 struct net_bridge_port *p;
238 unsigned long *inuse; 239 unsigned long *inuse;
239 240
240 inuse = kmalloc(BITS_TO_LONGS(BR_MAX_PORTS)*sizeof(unsigned long), 241 inuse = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
241 GFP_KERNEL); 242 GFP_KERNEL);
242 if (!inuse) 243 if (!inuse)
243 return -ENOMEM; 244 return -ENOMEM;
244 245
245 memset(inuse, 0, BITS_TO_LONGS(BR_MAX_PORTS)*sizeof(unsigned long));
246 set_bit(0, inuse); /* zero is reserved */ 246 set_bit(0, inuse); /* zero is reserved */
247 list_for_each_entry(p, &br->port_list, list) { 247 list_for_each_entry(p, &br->port_list, list) {
248 set_bit(p->port_no, inuse); 248 set_bit(p->port_no, inuse);
@@ -264,11 +264,10 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
264 if (index < 0) 264 if (index < 0)
265 return ERR_PTR(index); 265 return ERR_PTR(index);
266 266
267 p = kmalloc(sizeof(*p), GFP_KERNEL); 267 p = kzalloc(sizeof(*p), GFP_KERNEL);
268 if (p == NULL) 268 if (p == NULL)
269 return ERR_PTR(-ENOMEM); 269 return ERR_PTR(-ENOMEM);
270 270
271 memset(p, 0, sizeof(*p));
272 p->br = br; 271 p->br = br;
273 dev_hold(dev); 272 dev_hold(dev);
274 p->dev = dev; 273 p->dev = dev;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 4eef83755315..b7766562d72c 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -19,13 +19,8 @@
19#include <linux/netfilter_bridge.h> 19#include <linux/netfilter_bridge.h>
20#include "br_private.h" 20#include "br_private.h"
21 21
22const unsigned char bridge_ula[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22/* Bridge group multicast address 802.1d (pg 51). */
23 23const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24static int br_pass_frame_up_finish(struct sk_buff *skb)
25{
26 netif_receive_skb(skb);
27 return 0;
28}
29 24
30static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 25static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
31{ 26{
@@ -38,7 +33,7 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
38 skb->dev = br->dev; 33 skb->dev = br->dev;
39 34
40 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 35 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
41 br_pass_frame_up_finish); 36 netif_receive_skb);
42} 37}
43 38
44/* note: already called with rcu_read_lock (preempt_disabled) */ 39/* note: already called with rcu_read_lock (preempt_disabled) */
@@ -100,6 +95,25 @@ drop:
100 goto out; 95 goto out;
101} 96}
102 97
98/* note: already called with rcu_read_lock (preempt_disabled) */
99static int br_handle_local_finish(struct sk_buff *skb)
100{
101 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
102
103 if (p && p->state != BR_STATE_DISABLED)
104 br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
105
106 return 0; /* process further */
107}
108
109/* Does address match the link local multicast address.
110 * 01:80:c2:00:00:0X
111 */
112static inline int is_link_local(const unsigned char *dest)
113{
114 return memcmp(dest, br_group_address, 5) == 0 && (dest[5] & 0xf0) == 0;
115}
116
103/* 117/*
104 * Called via br_handle_frame_hook. 118 * Called via br_handle_frame_hook.
105 * Return 0 if *pskb should be processed furthur 119 * Return 0 if *pskb should be processed furthur
@@ -117,15 +131,10 @@ int br_handle_frame(struct net_bridge_port *p, struct sk_buff **pskb)
117 if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) 131 if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
118 goto err; 132 goto err;
119 133
120 if (p->br->stp_enabled && 134 if (unlikely(is_link_local(dest))) {
121 !memcmp(dest, bridge_ula, 5) && 135 skb->pkt_type = PACKET_HOST;
122 !(dest[5] & 0xF0)) { 136 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
123 if (!dest[5]) { 137 NULL, br_handle_local_finish) != 0;
124 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev,
125 NULL, br_stp_handle_bpdu);
126 return 1;
127 }
128 goto err;
129 } 138 }
130 139
131 if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) { 140 if (p->state == BR_STATE_FORWARDING || p->state == BR_STATE_LEARNING) {
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index e060aad8624d..f29450b788be 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -61,15 +61,25 @@ static int brnf_filter_vlan_tagged = 1;
61#define brnf_filter_vlan_tagged 1 61#define brnf_filter_vlan_tagged 1
62#endif 62#endif
63 63
64#define IS_VLAN_IP (skb->protocol == __constant_htons(ETH_P_8021Q) && \ 64static __be16 inline vlan_proto(const struct sk_buff *skb)
65 hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP) && \ 65{
66 brnf_filter_vlan_tagged) 66 return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
67#define IS_VLAN_IPV6 (skb->protocol == __constant_htons(ETH_P_8021Q) && \ 67}
68 hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_IPV6) && \ 68
69 brnf_filter_vlan_tagged) 69#define IS_VLAN_IP(skb) \
70#define IS_VLAN_ARP (skb->protocol == __constant_htons(ETH_P_8021Q) && \ 70 (skb->protocol == htons(ETH_P_8021Q) && \
71 hdr->h_vlan_encapsulated_proto == __constant_htons(ETH_P_ARP) && \ 71 vlan_proto(skb) == htons(ETH_P_IP) && \
72 brnf_filter_vlan_tagged) 72 brnf_filter_vlan_tagged)
73
74#define IS_VLAN_IPV6(skb) \
75 (skb->protocol == htons(ETH_P_8021Q) && \
76 vlan_proto(skb) == htons(ETH_P_IPV6) &&\
77 brnf_filter_vlan_tagged)
78
79#define IS_VLAN_ARP(skb) \
80 (skb->protocol == htons(ETH_P_8021Q) && \
81 vlan_proto(skb) == htons(ETH_P_ARP) && \
82 brnf_filter_vlan_tagged)
73 83
74/* We need these fake structures to make netfilter happy -- 84/* We need these fake structures to make netfilter happy --
75 * lots of places assume that skb->dst != NULL, which isn't 85 * lots of places assume that skb->dst != NULL, which isn't
@@ -103,6 +113,25 @@ static inline struct net_device *bridge_parent(const struct net_device *dev)
103 return port ? port->br->dev : NULL; 113 return port ? port->br->dev : NULL;
104} 114}
105 115
116static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
117{
118 skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
119 if (likely(skb->nf_bridge))
120 atomic_set(&(skb->nf_bridge->use), 1);
121
122 return skb->nf_bridge;
123}
124
125static inline void nf_bridge_save_header(struct sk_buff *skb)
126{
127 int header_size = 16;
128
129 if (skb->protocol == htons(ETH_P_8021Q))
130 header_size = 18;
131
132 memcpy(skb->nf_bridge->data, skb->data - header_size, header_size);
133}
134
106/* PF_BRIDGE/PRE_ROUTING *********************************************/ 135/* PF_BRIDGE/PRE_ROUTING *********************************************/
107/* Undo the changes made for ip6tables PREROUTING and continue the 136/* Undo the changes made for ip6tables PREROUTING and continue the
108 * bridge PRE_ROUTING hook. */ 137 * bridge PRE_ROUTING hook. */
@@ -120,7 +149,7 @@ static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
120 dst_hold(skb->dst); 149 dst_hold(skb->dst);
121 150
122 skb->dev = nf_bridge->physindev; 151 skb->dev = nf_bridge->physindev;
123 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 152 if (skb->protocol == htons(ETH_P_8021Q)) {
124 skb_push(skb, VLAN_HLEN); 153 skb_push(skb, VLAN_HLEN);
125 skb->nh.raw -= VLAN_HLEN; 154 skb->nh.raw -= VLAN_HLEN;
126 } 155 }
@@ -136,7 +165,7 @@ static void __br_dnat_complain(void)
136 165
137 if (jiffies - last_complaint >= 5 * HZ) { 166 if (jiffies - last_complaint >= 5 * HZ) {
138 printk(KERN_WARNING "Performing cross-bridge DNAT requires IP " 167 printk(KERN_WARNING "Performing cross-bridge DNAT requires IP "
139 "forwarding to be enabled\n"); 168 "forwarding to be enabled\n");
140 last_complaint = jiffies; 169 last_complaint = jiffies;
141 } 170 }
142} 171}
@@ -196,7 +225,7 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
196 if (!skb->dev) 225 if (!skb->dev)
197 kfree_skb(skb); 226 kfree_skb(skb);
198 else { 227 else {
199 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 228 if (skb->protocol == htons(ETH_P_8021Q)) {
200 skb_pull(skb, VLAN_HLEN); 229 skb_pull(skb, VLAN_HLEN);
201 skb->nh.raw += VLAN_HLEN; 230 skb->nh.raw += VLAN_HLEN;
202 } 231 }
@@ -218,12 +247,17 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
218 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; 247 nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
219 248
220 if (dnat_took_place(skb)) { 249 if (dnat_took_place(skb)) {
221 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, 250 if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) {
222 dev)) {
223 struct rtable *rt; 251 struct rtable *rt;
224 struct flowi fl = { .nl_u = 252 struct flowi fl = {
225 { .ip4_u = { .daddr = iph->daddr, .saddr = 0 , 253 .nl_u = {
226 .tos = RT_TOS(iph->tos)} }, .proto = 0}; 254 .ip4_u = {
255 .daddr = iph->daddr,
256 .saddr = 0,
257 .tos = RT_TOS(iph->tos) },
258 },
259 .proto = 0,
260 };
227 261
228 if (!ip_route_output_key(&rt, &fl)) { 262 if (!ip_route_output_key(&rt, &fl)) {
229 /* - Bridged-and-DNAT'ed traffic doesn't 263 /* - Bridged-and-DNAT'ed traffic doesn't
@@ -247,7 +281,7 @@ bridged_dnat:
247 nf_bridge->mask |= BRNF_BRIDGED_DNAT; 281 nf_bridge->mask |= BRNF_BRIDGED_DNAT;
248 skb->dev = nf_bridge->physindev; 282 skb->dev = nf_bridge->physindev;
249 if (skb->protocol == 283 if (skb->protocol ==
250 __constant_htons(ETH_P_8021Q)) { 284 htons(ETH_P_8021Q)) {
251 skb_push(skb, VLAN_HLEN); 285 skb_push(skb, VLAN_HLEN);
252 skb->nh.raw -= VLAN_HLEN; 286 skb->nh.raw -= VLAN_HLEN;
253 } 287 }
@@ -257,8 +291,7 @@ bridged_dnat:
257 1); 291 1);
258 return 0; 292 return 0;
259 } 293 }
260 memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, 294 memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
261 ETH_ALEN);
262 skb->pkt_type = PACKET_HOST; 295 skb->pkt_type = PACKET_HOST;
263 } 296 }
264 } else { 297 } else {
@@ -267,7 +300,7 @@ bridged_dnat:
267 } 300 }
268 301
269 skb->dev = nf_bridge->physindev; 302 skb->dev = nf_bridge->physindev;
270 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 303 if (skb->protocol == htons(ETH_P_8021Q)) {
271 skb_push(skb, VLAN_HLEN); 304 skb_push(skb, VLAN_HLEN);
272 skb->nh.raw -= VLAN_HLEN; 305 skb->nh.raw -= VLAN_HLEN;
273 } 306 }
@@ -297,10 +330,10 @@ static struct net_device *setup_pre_routing(struct sk_buff *skb)
297/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */ 330/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
298static int check_hbh_len(struct sk_buff *skb) 331static int check_hbh_len(struct sk_buff *skb)
299{ 332{
300 unsigned char *raw = (u8*)(skb->nh.ipv6h+1); 333 unsigned char *raw = (u8 *) (skb->nh.ipv6h + 1);
301 u32 pkt_len; 334 u32 pkt_len;
302 int off = raw - skb->nh.raw; 335 int off = raw - skb->nh.raw;
303 int len = (raw[1]+1)<<3; 336 int len = (raw[1] + 1) << 3;
304 337
305 if ((raw + len) - skb->data > skb_headlen(skb)) 338 if ((raw + len) - skb->data > skb_headlen(skb))
306 goto bad; 339 goto bad;
@@ -309,7 +342,7 @@ static int check_hbh_len(struct sk_buff *skb)
309 len -= 2; 342 len -= 2;
310 343
311 while (len > 0) { 344 while (len > 0) {
312 int optlen = skb->nh.raw[off+1]+2; 345 int optlen = skb->nh.raw[off + 1] + 2;
313 346
314 switch (skb->nh.raw[off]) { 347 switch (skb->nh.raw[off]) {
315 case IPV6_TLV_PAD0: 348 case IPV6_TLV_PAD0:
@@ -320,16 +353,16 @@ static int check_hbh_len(struct sk_buff *skb)
320 break; 353 break;
321 354
322 case IPV6_TLV_JUMBO: 355 case IPV6_TLV_JUMBO:
323 if (skb->nh.raw[off+1] != 4 || (off&3) != 2) 356 if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2)
324 goto bad; 357 goto bad;
325 pkt_len = ntohl(*(u32*)(skb->nh.raw+off+2)); 358 pkt_len = ntohl(*(u32 *) (skb->nh.raw + off + 2));
326 if (pkt_len <= IPV6_MAXPLEN || 359 if (pkt_len <= IPV6_MAXPLEN ||
327 skb->nh.ipv6h->payload_len) 360 skb->nh.ipv6h->payload_len)
328 goto bad; 361 goto bad;
329 if (pkt_len > skb->len - sizeof(struct ipv6hdr)) 362 if (pkt_len > skb->len - sizeof(struct ipv6hdr))
330 goto bad; 363 goto bad;
331 if (pskb_trim_rcsum(skb, 364 if (pskb_trim_rcsum(skb,
332 pkt_len+sizeof(struct ipv6hdr))) 365 pkt_len + sizeof(struct ipv6hdr)))
333 goto bad; 366 goto bad;
334 break; 367 break;
335 default: 368 default:
@@ -350,12 +383,13 @@ bad:
350/* Replicate the checks that IPv6 does on packet reception and pass the packet 383/* Replicate the checks that IPv6 does on packet reception and pass the packet
351 * to ip6tables, which doesn't support NAT, so things are fairly simple. */ 384 * to ip6tables, which doesn't support NAT, so things are fairly simple. */
352static unsigned int br_nf_pre_routing_ipv6(unsigned int hook, 385static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
353 struct sk_buff *skb, const struct net_device *in, 386 struct sk_buff *skb,
354 const struct net_device *out, int (*okfn)(struct sk_buff *)) 387 const struct net_device *in,
388 const struct net_device *out,
389 int (*okfn)(struct sk_buff *))
355{ 390{
356 struct ipv6hdr *hdr; 391 struct ipv6hdr *hdr;
357 u32 pkt_len; 392 u32 pkt_len;
358 struct nf_bridge_info *nf_bridge;
359 393
360 if (skb->len < sizeof(struct ipv6hdr)) 394 if (skb->len < sizeof(struct ipv6hdr))
361 goto inhdr_error; 395 goto inhdr_error;
@@ -381,10 +415,10 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
381 } 415 }
382 } 416 }
383 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb)) 417 if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
384 goto inhdr_error; 418 goto inhdr_error;
385 419
386 nf_bridge_put(skb->nf_bridge); 420 nf_bridge_put(skb->nf_bridge);
387 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) 421 if (!nf_bridge_alloc(skb))
388 return NF_DROP; 422 return NF_DROP;
389 if (!setup_pre_routing(skb)) 423 if (!setup_pre_routing(skb))
390 return NF_DROP; 424 return NF_DROP;
@@ -412,10 +446,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
412 struct iphdr *iph; 446 struct iphdr *iph;
413 __u32 len; 447 __u32 len;
414 struct sk_buff *skb = *pskb; 448 struct sk_buff *skb = *pskb;
415 struct nf_bridge_info *nf_bridge;
416 struct vlan_ethhdr *hdr = vlan_eth_hdr(*pskb);
417 449
418 if (skb->protocol == __constant_htons(ETH_P_IPV6) || IS_VLAN_IPV6) { 450 if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb)) {
419#ifdef CONFIG_SYSCTL 451#ifdef CONFIG_SYSCTL
420 if (!brnf_call_ip6tables) 452 if (!brnf_call_ip6tables)
421 return NF_ACCEPT; 453 return NF_ACCEPT;
@@ -423,10 +455,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
423 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) 455 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
424 goto out; 456 goto out;
425 457
426 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 458 if (skb->protocol == htons(ETH_P_8021Q)) {
427 u8 *vhdr = skb->data; 459 skb_pull_rcsum(skb, VLAN_HLEN);
428 skb_pull(skb, VLAN_HLEN);
429 skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);
430 skb->nh.raw += VLAN_HLEN; 460 skb->nh.raw += VLAN_HLEN;
431 } 461 }
432 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn); 462 return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
@@ -436,16 +466,14 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
436 return NF_ACCEPT; 466 return NF_ACCEPT;
437#endif 467#endif
438 468
439 if (skb->protocol != __constant_htons(ETH_P_IP) && !IS_VLAN_IP) 469 if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb))
440 return NF_ACCEPT; 470 return NF_ACCEPT;
441 471
442 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL) 472 if ((skb = skb_share_check(*pskb, GFP_ATOMIC)) == NULL)
443 goto out; 473 goto out;
444 474
445 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 475 if (skb->protocol == htons(ETH_P_8021Q)) {
446 u8 *vhdr = skb->data; 476 skb_pull_rcsum(skb, VLAN_HLEN);
447 skb_pull(skb, VLAN_HLEN);
448 skb_postpull_rcsum(skb, vhdr, VLAN_HLEN);
449 skb->nh.raw += VLAN_HLEN; 477 skb->nh.raw += VLAN_HLEN;
450 } 478 }
451 479
@@ -456,15 +484,15 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
456 if (iph->ihl < 5 || iph->version != 4) 484 if (iph->ihl < 5 || iph->version != 4)
457 goto inhdr_error; 485 goto inhdr_error;
458 486
459 if (!pskb_may_pull(skb, 4*iph->ihl)) 487 if (!pskb_may_pull(skb, 4 * iph->ihl))
460 goto inhdr_error; 488 goto inhdr_error;
461 489
462 iph = skb->nh.iph; 490 iph = skb->nh.iph;
463 if (ip_fast_csum((__u8 *)iph, iph->ihl) != 0) 491 if (ip_fast_csum((__u8 *) iph, iph->ihl) != 0)
464 goto inhdr_error; 492 goto inhdr_error;
465 493
466 len = ntohs(iph->tot_len); 494 len = ntohs(iph->tot_len);
467 if (skb->len < len || len < 4*iph->ihl) 495 if (skb->len < len || len < 4 * iph->ihl)
468 goto inhdr_error; 496 goto inhdr_error;
469 497
470 if (skb->len > len) { 498 if (skb->len > len) {
@@ -473,8 +501,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
473 skb->ip_summed = CHECKSUM_NONE; 501 skb->ip_summed = CHECKSUM_NONE;
474 } 502 }
475 503
476 nf_bridge_put(skb->nf_bridge); 504 nf_bridge_put(skb->nf_bridge);
477 if ((nf_bridge = nf_bridge_alloc(skb)) == NULL) 505 if (!nf_bridge_alloc(skb))
478 return NF_DROP; 506 return NF_DROP;
479 if (!setup_pre_routing(skb)) 507 if (!setup_pre_routing(skb))
480 return NF_DROP; 508 return NF_DROP;
@@ -486,7 +514,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
486 return NF_STOLEN; 514 return NF_STOLEN;
487 515
488inhdr_error: 516inhdr_error:
489// IP_INC_STATS_BH(IpInHdrErrors); 517// IP_INC_STATS_BH(IpInHdrErrors);
490out: 518out:
491 return NF_DROP; 519 return NF_DROP;
492} 520}
@@ -500,8 +528,9 @@ out:
500 * register an IPv4 PRE_ROUTING 'sabotage' hook that will 528 * register an IPv4 PRE_ROUTING 'sabotage' hook that will
501 * prevent this from happening. */ 529 * prevent this from happening. */
502static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb, 530static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb,
503 const struct net_device *in, const struct net_device *out, 531 const struct net_device *in,
504 int (*okfn)(struct sk_buff *)) 532 const struct net_device *out,
533 int (*okfn)(struct sk_buff *))
505{ 534{
506 struct sk_buff *skb = *pskb; 535 struct sk_buff *skb = *pskb;
507 536
@@ -513,15 +542,13 @@ static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff **pskb,
513 return NF_ACCEPT; 542 return NF_ACCEPT;
514} 543}
515 544
516
517/* PF_BRIDGE/FORWARD *************************************************/ 545/* PF_BRIDGE/FORWARD *************************************************/
518static int br_nf_forward_finish(struct sk_buff *skb) 546static int br_nf_forward_finish(struct sk_buff *skb)
519{ 547{
520 struct nf_bridge_info *nf_bridge = skb->nf_bridge; 548 struct nf_bridge_info *nf_bridge = skb->nf_bridge;
521 struct net_device *in; 549 struct net_device *in;
522 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
523 550
524 if (skb->protocol != __constant_htons(ETH_P_ARP) && !IS_VLAN_ARP) { 551 if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
525 in = nf_bridge->physindev; 552 in = nf_bridge->physindev;
526 if (nf_bridge->mask & BRNF_PKT_TYPE) { 553 if (nf_bridge->mask & BRNF_PKT_TYPE) {
527 skb->pkt_type = PACKET_OTHERHOST; 554 skb->pkt_type = PACKET_OTHERHOST;
@@ -530,12 +557,12 @@ static int br_nf_forward_finish(struct sk_buff *skb)
530 } else { 557 } else {
531 in = *((struct net_device **)(skb->cb)); 558 in = *((struct net_device **)(skb->cb));
532 } 559 }
533 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 560 if (skb->protocol == htons(ETH_P_8021Q)) {
534 skb_push(skb, VLAN_HLEN); 561 skb_push(skb, VLAN_HLEN);
535 skb->nh.raw -= VLAN_HLEN; 562 skb->nh.raw -= VLAN_HLEN;
536 } 563 }
537 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in, 564 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_FORWARD, skb, in,
538 skb->dev, br_forward_finish, 1); 565 skb->dev, br_forward_finish, 1);
539 return 0; 566 return 0;
540} 567}
541 568
@@ -545,12 +572,12 @@ static int br_nf_forward_finish(struct sk_buff *skb)
545 * because of the physdev module. For ARP, indev and outdev are the 572 * because of the physdev module. For ARP, indev and outdev are the
546 * bridge ports. */ 573 * bridge ports. */
547static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb, 574static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
548 const struct net_device *in, const struct net_device *out, 575 const struct net_device *in,
549 int (*okfn)(struct sk_buff *)) 576 const struct net_device *out,
577 int (*okfn)(struct sk_buff *))
550{ 578{
551 struct sk_buff *skb = *pskb; 579 struct sk_buff *skb = *pskb;
552 struct nf_bridge_info *nf_bridge; 580 struct nf_bridge_info *nf_bridge;
553 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
554 struct net_device *parent; 581 struct net_device *parent;
555 int pf; 582 int pf;
556 583
@@ -561,12 +588,12 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
561 if (!parent) 588 if (!parent)
562 return NF_DROP; 589 return NF_DROP;
563 590
564 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) 591 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb))
565 pf = PF_INET; 592 pf = PF_INET;
566 else 593 else
567 pf = PF_INET6; 594 pf = PF_INET6;
568 595
569 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 596 if (skb->protocol == htons(ETH_P_8021Q)) {
570 skb_pull(*pskb, VLAN_HLEN); 597 skb_pull(*pskb, VLAN_HLEN);
571 (*pskb)->nh.raw += VLAN_HLEN; 598 (*pskb)->nh.raw += VLAN_HLEN;
572 } 599 }
@@ -588,11 +615,11 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
588} 615}
589 616
590static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb, 617static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
591 const struct net_device *in, const struct net_device *out, 618 const struct net_device *in,
592 int (*okfn)(struct sk_buff *)) 619 const struct net_device *out,
620 int (*okfn)(struct sk_buff *))
593{ 621{
594 struct sk_buff *skb = *pskb; 622 struct sk_buff *skb = *pskb;
595 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
596 struct net_device **d = (struct net_device **)(skb->cb); 623 struct net_device **d = (struct net_device **)(skb->cb);
597 624
598#ifdef CONFIG_SYSCTL 625#ifdef CONFIG_SYSCTL
@@ -600,15 +627,15 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
600 return NF_ACCEPT; 627 return NF_ACCEPT;
601#endif 628#endif
602 629
603 if (skb->protocol != __constant_htons(ETH_P_ARP)) { 630 if (skb->protocol != htons(ETH_P_ARP)) {
604 if (!IS_VLAN_ARP) 631 if (!IS_VLAN_ARP(skb))
605 return NF_ACCEPT; 632 return NF_ACCEPT;
606 skb_pull(*pskb, VLAN_HLEN); 633 skb_pull(*pskb, VLAN_HLEN);
607 (*pskb)->nh.raw += VLAN_HLEN; 634 (*pskb)->nh.raw += VLAN_HLEN;
608 } 635 }
609 636
610 if (skb->nh.arph->ar_pln != 4) { 637 if (skb->nh.arph->ar_pln != 4) {
611 if (IS_VLAN_ARP) { 638 if (IS_VLAN_ARP(skb)) {
612 skb_push(*pskb, VLAN_HLEN); 639 skb_push(*pskb, VLAN_HLEN);
613 (*pskb)->nh.raw -= VLAN_HLEN; 640 (*pskb)->nh.raw -= VLAN_HLEN;
614 } 641 }
@@ -621,17 +648,16 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff **pskb,
621 return NF_STOLEN; 648 return NF_STOLEN;
622} 649}
623 650
624
625/* PF_BRIDGE/LOCAL_OUT ***********************************************/ 651/* PF_BRIDGE/LOCAL_OUT ***********************************************/
626static int br_nf_local_out_finish(struct sk_buff *skb) 652static int br_nf_local_out_finish(struct sk_buff *skb)
627{ 653{
628 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 654 if (skb->protocol == htons(ETH_P_8021Q)) {
629 skb_push(skb, VLAN_HLEN); 655 skb_push(skb, VLAN_HLEN);
630 skb->nh.raw -= VLAN_HLEN; 656 skb->nh.raw -= VLAN_HLEN;
631 } 657 }
632 658
633 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 659 NF_HOOK_THRESH(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
634 br_forward_finish, NF_BR_PRI_FIRST + 1); 660 br_forward_finish, NF_BR_PRI_FIRST + 1);
635 661
636 return 0; 662 return 0;
637} 663}
@@ -657,19 +683,19 @@ static int br_nf_local_out_finish(struct sk_buff *skb)
657 * even routed packets that didn't arrive on a bridge interface have their 683 * even routed packets that didn't arrive on a bridge interface have their
658 * nf_bridge->physindev set. */ 684 * nf_bridge->physindev set. */
659static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb, 685static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
660 const struct net_device *in, const struct net_device *out, 686 const struct net_device *in,
661 int (*okfn)(struct sk_buff *)) 687 const struct net_device *out,
688 int (*okfn)(struct sk_buff *))
662{ 689{
663 struct net_device *realindev, *realoutdev; 690 struct net_device *realindev, *realoutdev;
664 struct sk_buff *skb = *pskb; 691 struct sk_buff *skb = *pskb;
665 struct nf_bridge_info *nf_bridge; 692 struct nf_bridge_info *nf_bridge;
666 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
667 int pf; 693 int pf;
668 694
669 if (!skb->nf_bridge) 695 if (!skb->nf_bridge)
670 return NF_ACCEPT; 696 return NF_ACCEPT;
671 697
672 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) 698 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb))
673 pf = PF_INET; 699 pf = PF_INET;
674 else 700 else
675 pf = PF_INET6; 701 pf = PF_INET6;
@@ -695,7 +721,7 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
695 skb->pkt_type = PACKET_OTHERHOST; 721 skb->pkt_type = PACKET_OTHERHOST;
696 nf_bridge->mask ^= BRNF_PKT_TYPE; 722 nf_bridge->mask ^= BRNF_PKT_TYPE;
697 } 723 }
698 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 724 if (skb->protocol == htons(ETH_P_8021Q)) {
699 skb_push(skb, VLAN_HLEN); 725 skb_push(skb, VLAN_HLEN);
700 skb->nh.raw -= VLAN_HLEN; 726 skb->nh.raw -= VLAN_HLEN;
701 } 727 }
@@ -713,14 +739,14 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
713 if (nf_bridge->netoutdev) 739 if (nf_bridge->netoutdev)
714 realoutdev = nf_bridge->netoutdev; 740 realoutdev = nf_bridge->netoutdev;
715#endif 741#endif
716 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 742 if (skb->protocol == htons(ETH_P_8021Q)) {
717 skb_pull(skb, VLAN_HLEN); 743 skb_pull(skb, VLAN_HLEN);
718 (*pskb)->nh.raw += VLAN_HLEN; 744 (*pskb)->nh.raw += VLAN_HLEN;
719 } 745 }
720 /* IP forwarded traffic has a physindev, locally 746 /* IP forwarded traffic has a physindev, locally
721 * generated traffic hasn't. */ 747 * generated traffic hasn't. */
722 if (realindev != NULL) { 748 if (realindev != NULL) {
723 if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) { 749 if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT)) {
724 struct net_device *parent = bridge_parent(realindev); 750 struct net_device *parent = bridge_parent(realindev);
725 if (parent) 751 if (parent)
726 realindev = parent; 752 realindev = parent;
@@ -742,12 +768,12 @@ out:
742 768
743/* PF_BRIDGE/POST_ROUTING ********************************************/ 769/* PF_BRIDGE/POST_ROUTING ********************************************/
744static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb, 770static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
745 const struct net_device *in, const struct net_device *out, 771 const struct net_device *in,
746 int (*okfn)(struct sk_buff *)) 772 const struct net_device *out,
773 int (*okfn)(struct sk_buff *))
747{ 774{
748 struct sk_buff *skb = *pskb; 775 struct sk_buff *skb = *pskb;
749 struct nf_bridge_info *nf_bridge = (*pskb)->nf_bridge; 776 struct nf_bridge_info *nf_bridge = (*pskb)->nf_bridge;
750 struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
751 struct net_device *realoutdev = bridge_parent(skb->dev); 777 struct net_device *realoutdev = bridge_parent(skb->dev);
752 int pf; 778 int pf;
753 779
@@ -756,7 +782,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
756 * keep the check just to be sure... */ 782 * keep the check just to be sure... */
757 if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) { 783 if (skb->mac.raw < skb->head || skb->mac.raw + ETH_HLEN > skb->data) {
758 printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: " 784 printk(KERN_CRIT "br_netfilter: Argh!! br_nf_post_routing: "
759 "bad mac.raw pointer."); 785 "bad mac.raw pointer.");
760 goto print_error; 786 goto print_error;
761 } 787 }
762#endif 788#endif
@@ -767,7 +793,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
767 if (!realoutdev) 793 if (!realoutdev)
768 return NF_DROP; 794 return NF_DROP;
769 795
770 if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP) 796 if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb))
771 pf = PF_INET; 797 pf = PF_INET;
772 else 798 else
773 pf = PF_INET6; 799 pf = PF_INET6;
@@ -786,7 +812,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
786 nf_bridge->mask |= BRNF_PKT_TYPE; 812 nf_bridge->mask |= BRNF_PKT_TYPE;
787 } 813 }
788 814
789 if (skb->protocol == __constant_htons(ETH_P_8021Q)) { 815 if (skb->protocol == htons(ETH_P_8021Q)) {
790 skb_pull(skb, VLAN_HLEN); 816 skb_pull(skb, VLAN_HLEN);
791 skb->nh.raw += VLAN_HLEN; 817 skb->nh.raw += VLAN_HLEN;
792 } 818 }
@@ -798,7 +824,7 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
798 realoutdev = nf_bridge->netoutdev; 824 realoutdev = nf_bridge->netoutdev;
799#endif 825#endif
800 NF_HOOK(pf, NF_IP_POST_ROUTING, skb, NULL, realoutdev, 826 NF_HOOK(pf, NF_IP_POST_ROUTING, skb, NULL, realoutdev,
801 br_dev_queue_push_xmit); 827 br_dev_queue_push_xmit);
802 828
803 return NF_STOLEN; 829 return NF_STOLEN;
804 830
@@ -810,18 +836,18 @@ print_error:
810 printk("[%s]", realoutdev->name); 836 printk("[%s]", realoutdev->name);
811 } 837 }
812 printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw, 838 printk(" head:%p, raw:%p, data:%p\n", skb->head, skb->mac.raw,
813 skb->data); 839 skb->data);
814 return NF_ACCEPT; 840 return NF_ACCEPT;
815#endif 841#endif
816} 842}
817 843
818
819/* IP/SABOTAGE *****************************************************/ 844/* IP/SABOTAGE *****************************************************/
820/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING 845/* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
821 * for the second time. */ 846 * for the second time. */
822static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb, 847static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
823 const struct net_device *in, const struct net_device *out, 848 const struct net_device *in,
824 int (*okfn)(struct sk_buff *)) 849 const struct net_device *out,
850 int (*okfn)(struct sk_buff *))
825{ 851{
826 if ((*pskb)->nf_bridge && 852 if ((*pskb)->nf_bridge &&
827 !((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) { 853 !((*pskb)->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
@@ -835,18 +861,18 @@ static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff **pskb,
835 * and PF_INET(6)/POST_ROUTING until we have done the forwarding 861 * and PF_INET(6)/POST_ROUTING until we have done the forwarding
836 * decision in the bridge code and have determined nf_bridge->physoutdev. */ 862 * decision in the bridge code and have determined nf_bridge->physoutdev. */
837static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb, 863static unsigned int ip_sabotage_out(unsigned int hook, struct sk_buff **pskb,
838 const struct net_device *in, const struct net_device *out, 864 const struct net_device *in,
839 int (*okfn)(struct sk_buff *)) 865 const struct net_device *out,
866 int (*okfn)(struct sk_buff *))
840{ 867{
841 struct sk_buff *skb = *pskb; 868 struct sk_buff *skb = *pskb;
842 869
843 if ((out->hard_start_xmit == br_dev_xmit && 870 if ((out->hard_start_xmit == br_dev_xmit &&
844 okfn != br_nf_forward_finish && 871 okfn != br_nf_forward_finish &&
845 okfn != br_nf_local_out_finish && 872 okfn != br_nf_local_out_finish && okfn != br_dev_queue_push_xmit)
846 okfn != br_dev_queue_push_xmit)
847#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) 873#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
848 || ((out->priv_flags & IFF_802_1Q_VLAN) && 874 || ((out->priv_flags & IFF_802_1Q_VLAN) &&
849 VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit) 875 VLAN_DEV_INFO(out)->real_dev->hard_start_xmit == br_dev_xmit)
850#endif 876#endif
851 ) { 877 ) {
852 struct nf_bridge_info *nf_bridge; 878 struct nf_bridge_info *nf_bridge;
@@ -971,8 +997,8 @@ static struct nf_hook_ops br_nf_ops[] = {
971 997
972#ifdef CONFIG_SYSCTL 998#ifdef CONFIG_SYSCTL
973static 999static
974int brnf_sysctl_call_tables(ctl_table *ctl, int write, struct file * filp, 1000int brnf_sysctl_call_tables(ctl_table * ctl, int write, struct file *filp,
975 void __user *buffer, size_t *lenp, loff_t *ppos) 1001 void __user * buffer, size_t * lenp, loff_t * ppos)
976{ 1002{
977 int ret; 1003 int ret;
978 1004
@@ -1059,7 +1085,8 @@ int br_netfilter_init(void)
1059#ifdef CONFIG_SYSCTL 1085#ifdef CONFIG_SYSCTL
1060 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0); 1086 brnf_sysctl_header = register_sysctl_table(brnf_net_table, 0);
1061 if (brnf_sysctl_header == NULL) { 1087 if (brnf_sysctl_header == NULL) {
1062 printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); 1088 printk(KERN_WARNING
1089 "br_netfilter: can't register to sysctl.\n");
1063 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++) 1090 for (i = 0; i < ARRAY_SIZE(br_nf_ops); i++)
1064 nf_unregister_hook(&br_nf_ops[i]); 1091 nf_unregister_hook(&br_nf_ops[i]);
1065 return -EFAULT; 1092 return -EFAULT;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8f10e09f251b..86ecea7ed372 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -109,6 +109,7 @@ struct net_bridge
109 unsigned long bridge_hello_time; 109 unsigned long bridge_hello_time;
110 unsigned long bridge_forward_delay; 110 unsigned long bridge_forward_delay;
111 111
112 u8 group_addr[ETH_ALEN];
112 u16 root_port; 113 u16 root_port;
113 unsigned char stp_enabled; 114 unsigned char stp_enabled;
114 unsigned char topology_change; 115 unsigned char topology_change;
@@ -122,7 +123,7 @@ struct net_bridge
122}; 123};
123 124
124extern struct notifier_block br_device_notifier; 125extern struct notifier_block br_device_notifier;
125extern const unsigned char bridge_ula[6]; 126extern const u8 br_group_address[ETH_ALEN];
126 127
127/* called under bridge lock */ 128/* called under bridge lock */
128static inline int br_is_root_bridge(const struct net_bridge *br) 129static inline int br_is_root_bridge(const struct net_bridge *br)
@@ -217,7 +218,8 @@ extern void br_stp_set_path_cost(struct net_bridge_port *p,
217extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id); 218extern ssize_t br_show_bridge_id(char *buf, const struct bridge_id *id);
218 219
219/* br_stp_bpdu.c */ 220/* br_stp_bpdu.c */
220extern int br_stp_handle_bpdu(struct sk_buff *skb); 221extern int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
222 struct packet_type *pt, struct net_device *orig_dev);
221 223
222/* br_stp_timer.c */ 224/* br_stp_timer.c */
223extern void br_stp_timer_init(struct net_bridge *br); 225extern void br_stp_timer_init(struct net_bridge *br);
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 296f6a487c52..8934a54792be 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -15,158 +15,162 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netfilter_bridge.h> 17#include <linux/netfilter_bridge.h>
18#include <linux/etherdevice.h>
19#include <linux/llc.h>
20#include <net/llc.h>
21#include <net/llc_pdu.h>
18 22
19#include "br_private.h" 23#include "br_private.h"
20#include "br_private_stp.h" 24#include "br_private_stp.h"
21 25
22#define JIFFIES_TO_TICKS(j) (((j) << 8) / HZ) 26#define STP_HZ 256
23#define TICKS_TO_JIFFIES(j) (((j) * HZ) >> 8)
24 27
25static void br_send_bpdu(struct net_bridge_port *p, unsigned char *data, int length) 28#define LLC_RESERVE sizeof(struct llc_pdu_un)
29
30static void br_send_bpdu(struct net_bridge_port *p,
31 const unsigned char *data, int length)
26{ 32{
27 struct net_device *dev;
28 struct sk_buff *skb; 33 struct sk_buff *skb;
29 int size;
30 34
31 if (!p->br->stp_enabled) 35 if (!p->br->stp_enabled)
32 return; 36 return;
33 37
34 size = length + 2*ETH_ALEN + 2; 38 skb = dev_alloc_skb(length+LLC_RESERVE);
35 if (size < 60) 39 if (!skb)
36 size = 60;
37
38 dev = p->dev;
39
40 if ((skb = dev_alloc_skb(size)) == NULL) {
41 printk(KERN_INFO "br: memory squeeze!\n");
42 return; 40 return;
43 }
44 41
45 skb->dev = dev; 42 skb->dev = p->dev;
46 skb->protocol = htons(ETH_P_802_2); 43 skb->protocol = htons(ETH_P_802_2);
47 skb->mac.raw = skb_put(skb, size); 44
48 memcpy(skb->mac.raw, bridge_ula, ETH_ALEN); 45 skb_reserve(skb, LLC_RESERVE);
49 memcpy(skb->mac.raw+ETH_ALEN, dev->dev_addr, ETH_ALEN); 46 memcpy(__skb_put(skb, length), data, length);
50 skb->mac.raw[2*ETH_ALEN] = 0; 47
51 skb->mac.raw[2*ETH_ALEN+1] = length; 48 llc_pdu_header_init(skb, LLC_PDU_TYPE_U, LLC_SAP_BSPAN,
52 skb->nh.raw = skb->mac.raw + 2*ETH_ALEN + 2; 49 LLC_SAP_BSPAN, LLC_PDU_CMD);
53 memcpy(skb->nh.raw, data, length); 50 llc_pdu_init_as_ui_cmd(skb);
54 memset(skb->nh.raw + length, 0xa5, size - length - 2*ETH_ALEN - 2); 51
52 llc_mac_hdr_init(skb, p->dev->dev_addr, p->br->group_addr);
55 53
56 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 54 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
57 dev_queue_xmit); 55 dev_queue_xmit);
58} 56}
59 57
60static __inline__ void br_set_ticks(unsigned char *dest, int jiff) 58static inline void br_set_ticks(unsigned char *dest, int j)
61{ 59{
62 __u16 ticks; 60 unsigned long ticks = (STP_HZ * j)/ HZ;
63 61
64 ticks = JIFFIES_TO_TICKS(jiff); 62 *((__be16 *) dest) = htons(ticks);
65 dest[0] = (ticks >> 8) & 0xFF;
66 dest[1] = ticks & 0xFF;
67} 63}
68 64
69static __inline__ int br_get_ticks(unsigned char *dest) 65static inline int br_get_ticks(const unsigned char *src)
70{ 66{
71 return TICKS_TO_JIFFIES((dest[0] << 8) | dest[1]); 67 unsigned long ticks = ntohs(*(__be16 *)src);
68
69 return (ticks * HZ + STP_HZ - 1) / STP_HZ;
72} 70}
73 71
74/* called under bridge lock */ 72/* called under bridge lock */
75void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu) 73void br_send_config_bpdu(struct net_bridge_port *p, struct br_config_bpdu *bpdu)
76{ 74{
77 unsigned char buf[38]; 75 unsigned char buf[35];
78 76
79 buf[0] = 0x42; 77 buf[0] = 0;
80 buf[1] = 0x42; 78 buf[1] = 0;
81 buf[2] = 0x03; 79 buf[2] = 0;
82 buf[3] = 0; 80 buf[3] = BPDU_TYPE_CONFIG;
83 buf[4] = 0; 81 buf[4] = (bpdu->topology_change ? 0x01 : 0) |
84 buf[5] = 0;
85 buf[6] = BPDU_TYPE_CONFIG;
86 buf[7] = (bpdu->topology_change ? 0x01 : 0) |
87 (bpdu->topology_change_ack ? 0x80 : 0); 82 (bpdu->topology_change_ack ? 0x80 : 0);
88 buf[8] = bpdu->root.prio[0]; 83 buf[5] = bpdu->root.prio[0];
89 buf[9] = bpdu->root.prio[1]; 84 buf[6] = bpdu->root.prio[1];
90 buf[10] = bpdu->root.addr[0]; 85 buf[7] = bpdu->root.addr[0];
91 buf[11] = bpdu->root.addr[1]; 86 buf[8] = bpdu->root.addr[1];
92 buf[12] = bpdu->root.addr[2]; 87 buf[9] = bpdu->root.addr[2];
93 buf[13] = bpdu->root.addr[3]; 88 buf[10] = bpdu->root.addr[3];
94 buf[14] = bpdu->root.addr[4]; 89 buf[11] = bpdu->root.addr[4];
95 buf[15] = bpdu->root.addr[5]; 90 buf[12] = bpdu->root.addr[5];
96 buf[16] = (bpdu->root_path_cost >> 24) & 0xFF; 91 buf[13] = (bpdu->root_path_cost >> 24) & 0xFF;
97 buf[17] = (bpdu->root_path_cost >> 16) & 0xFF; 92 buf[14] = (bpdu->root_path_cost >> 16) & 0xFF;
98 buf[18] = (bpdu->root_path_cost >> 8) & 0xFF; 93 buf[15] = (bpdu->root_path_cost >> 8) & 0xFF;
99 buf[19] = bpdu->root_path_cost & 0xFF; 94 buf[16] = bpdu->root_path_cost & 0xFF;
100 buf[20] = bpdu->bridge_id.prio[0]; 95 buf[17] = bpdu->bridge_id.prio[0];
101 buf[21] = bpdu->bridge_id.prio[1]; 96 buf[18] = bpdu->bridge_id.prio[1];
102 buf[22] = bpdu->bridge_id.addr[0]; 97 buf[19] = bpdu->bridge_id.addr[0];
103 buf[23] = bpdu->bridge_id.addr[1]; 98 buf[20] = bpdu->bridge_id.addr[1];
104 buf[24] = bpdu->bridge_id.addr[2]; 99 buf[21] = bpdu->bridge_id.addr[2];
105 buf[25] = bpdu->bridge_id.addr[3]; 100 buf[22] = bpdu->bridge_id.addr[3];
106 buf[26] = bpdu->bridge_id.addr[4]; 101 buf[23] = bpdu->bridge_id.addr[4];
107 buf[27] = bpdu->bridge_id.addr[5]; 102 buf[24] = bpdu->bridge_id.addr[5];
108 buf[28] = (bpdu->port_id >> 8) & 0xFF; 103 buf[25] = (bpdu->port_id >> 8) & 0xFF;
109 buf[29] = bpdu->port_id & 0xFF; 104 buf[26] = bpdu->port_id & 0xFF;
110 105
111 br_set_ticks(buf+30, bpdu->message_age); 106 br_set_ticks(buf+27, bpdu->message_age);
112 br_set_ticks(buf+32, bpdu->max_age); 107 br_set_ticks(buf+29, bpdu->max_age);
113 br_set_ticks(buf+34, bpdu->hello_time); 108 br_set_ticks(buf+31, bpdu->hello_time);
114 br_set_ticks(buf+36, bpdu->forward_delay); 109 br_set_ticks(buf+33, bpdu->forward_delay);
115 110
116 br_send_bpdu(p, buf, 38); 111 br_send_bpdu(p, buf, 35);
117} 112}
118 113
119/* called under bridge lock */ 114/* called under bridge lock */
120void br_send_tcn_bpdu(struct net_bridge_port *p) 115void br_send_tcn_bpdu(struct net_bridge_port *p)
121{ 116{
122 unsigned char buf[7]; 117 unsigned char buf[4];
123 118
124 buf[0] = 0x42; 119 buf[0] = 0;
125 buf[1] = 0x42; 120 buf[1] = 0;
126 buf[2] = 0x03; 121 buf[2] = 0;
127 buf[3] = 0; 122 buf[3] = BPDU_TYPE_TCN;
128 buf[4] = 0;
129 buf[5] = 0;
130 buf[6] = BPDU_TYPE_TCN;
131 br_send_bpdu(p, buf, 7); 123 br_send_bpdu(p, buf, 7);
132} 124}
133 125
134static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00}; 126/*
135 127 * Called from llc.
136/* NO locks, but rcu_read_lock (preempt_disabled) */ 128 *
137int br_stp_handle_bpdu(struct sk_buff *skb) 129 * NO locks, but rcu_read_lock (preempt_disabled)
130 */
131int br_stp_rcv(struct sk_buff *skb, struct net_device *dev,
132 struct packet_type *pt, struct net_device *orig_dev)
138{ 133{
139 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 134 const struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
135 const unsigned char *dest = eth_hdr(skb)->h_dest;
136 struct net_bridge_port *p = rcu_dereference(dev->br_port);
140 struct net_bridge *br; 137 struct net_bridge *br;
141 unsigned char *buf; 138 const unsigned char *buf;
142 139
143 if (!p) 140 if (!p)
144 goto err; 141 goto err;
145 142
146 br = p->br; 143 if (pdu->ssap != LLC_SAP_BSPAN
147 spin_lock(&br->lock); 144 || pdu->dsap != LLC_SAP_BSPAN
145 || pdu->ctrl_1 != LLC_PDU_TYPE_U)
146 goto err;
148 147
149 if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP)) 148 if (!pskb_may_pull(skb, 4))
150 goto out; 149 goto err;
150
151 /* compare of protocol id and version */
152 buf = skb->data;
153 if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
154 goto err;
151 155
152 /* insert into forwarding database after filtering to avoid spoofing */ 156 br = p->br;
153 br_fdb_update(br, p, eth_hdr(skb)->h_source); 157 spin_lock(&br->lock);
154 158
155 if (!br->stp_enabled) 159 if (p->state == BR_STATE_DISABLED
160 || !br->stp_enabled
161 || !(br->dev->flags & IFF_UP))
156 goto out; 162 goto out;
157 163
158 /* need at least the 802 and STP headers */ 164 if (compare_ether_addr(dest, br->group_addr) != 0)
159 if (!pskb_may_pull(skb, sizeof(header)+1) ||
160 memcmp(skb->data, header, sizeof(header)))
161 goto out; 165 goto out;
162 166
163 buf = skb_pull(skb, sizeof(header)); 167 buf = skb_pull(skb, 3);
164 168
165 if (buf[0] == BPDU_TYPE_CONFIG) { 169 if (buf[0] == BPDU_TYPE_CONFIG) {
166 struct br_config_bpdu bpdu; 170 struct br_config_bpdu bpdu;
167 171
168 if (!pskb_may_pull(skb, 32)) 172 if (!pskb_may_pull(skb, 32))
169 goto out; 173 goto out;
170 174
171 buf = skb->data; 175 buf = skb->data;
172 bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0; 176 bpdu.topology_change = (buf[1] & 0x01) ? 1 : 0;
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 9bef55f56425..d0fcde82c6fc 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -39,13 +39,13 @@ static void br_hello_timer_expired(unsigned long arg)
39 struct net_bridge *br = (struct net_bridge *)arg; 39 struct net_bridge *br = (struct net_bridge *)arg;
40 40
41 pr_debug("%s: hello timer expired\n", br->dev->name); 41 pr_debug("%s: hello timer expired\n", br->dev->name);
42 spin_lock_bh(&br->lock); 42 spin_lock(&br->lock);
43 if (br->dev->flags & IFF_UP) { 43 if (br->dev->flags & IFF_UP) {
44 br_config_bpdu_generation(br); 44 br_config_bpdu_generation(br);
45 45
46 mod_timer(&br->hello_timer, jiffies + br->hello_time); 46 mod_timer(&br->hello_timer, jiffies + br->hello_time);
47 } 47 }
48 spin_unlock_bh(&br->lock); 48 spin_unlock(&br->lock);
49} 49}
50 50
51static void br_message_age_timer_expired(unsigned long arg) 51static void br_message_age_timer_expired(unsigned long arg)
@@ -71,7 +71,7 @@ static void br_message_age_timer_expired(unsigned long arg)
71 * running when we are the root bridge. So.. this was_root 71 * running when we are the root bridge. So.. this was_root
72 * check is redundant. I'm leaving it in for now, though. 72 * check is redundant. I'm leaving it in for now, though.
73 */ 73 */
74 spin_lock_bh(&br->lock); 74 spin_lock(&br->lock);
75 if (p->state == BR_STATE_DISABLED) 75 if (p->state == BR_STATE_DISABLED)
76 goto unlock; 76 goto unlock;
77 was_root = br_is_root_bridge(br); 77 was_root = br_is_root_bridge(br);
@@ -82,7 +82,7 @@ static void br_message_age_timer_expired(unsigned long arg)
82 if (br_is_root_bridge(br) && !was_root) 82 if (br_is_root_bridge(br) && !was_root)
83 br_become_root_bridge(br); 83 br_become_root_bridge(br);
84 unlock: 84 unlock:
85 spin_unlock_bh(&br->lock); 85 spin_unlock(&br->lock);
86} 86}
87 87
88static void br_forward_delay_timer_expired(unsigned long arg) 88static void br_forward_delay_timer_expired(unsigned long arg)
@@ -92,7 +92,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
92 92
93 pr_debug("%s: %d(%s) forward delay timer\n", 93 pr_debug("%s: %d(%s) forward delay timer\n",
94 br->dev->name, p->port_no, p->dev->name); 94 br->dev->name, p->port_no, p->dev->name);
95 spin_lock_bh(&br->lock); 95 spin_lock(&br->lock);
96 if (p->state == BR_STATE_LISTENING) { 96 if (p->state == BR_STATE_LISTENING) {
97 p->state = BR_STATE_LEARNING; 97 p->state = BR_STATE_LEARNING;
98 mod_timer(&p->forward_delay_timer, 98 mod_timer(&p->forward_delay_timer,
@@ -103,7 +103,7 @@ static void br_forward_delay_timer_expired(unsigned long arg)
103 br_topology_change_detection(br); 103 br_topology_change_detection(br);
104 } 104 }
105 br_log_state(p); 105 br_log_state(p);
106 spin_unlock_bh(&br->lock); 106 spin_unlock(&br->lock);
107} 107}
108 108
109static void br_tcn_timer_expired(unsigned long arg) 109static void br_tcn_timer_expired(unsigned long arg)
@@ -111,13 +111,13 @@ static void br_tcn_timer_expired(unsigned long arg)
111 struct net_bridge *br = (struct net_bridge *) arg; 111 struct net_bridge *br = (struct net_bridge *) arg;
112 112
113 pr_debug("%s: tcn timer expired\n", br->dev->name); 113 pr_debug("%s: tcn timer expired\n", br->dev->name);
114 spin_lock_bh(&br->lock); 114 spin_lock(&br->lock);
115 if (br->dev->flags & IFF_UP) { 115 if (br->dev->flags & IFF_UP) {
116 br_transmit_tcn(br); 116 br_transmit_tcn(br);
117 117
118 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); 118 mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time);
119 } 119 }
120 spin_unlock_bh(&br->lock); 120 spin_unlock(&br->lock);
121} 121}
122 122
123static void br_topology_change_timer_expired(unsigned long arg) 123static void br_topology_change_timer_expired(unsigned long arg)
@@ -125,10 +125,10 @@ static void br_topology_change_timer_expired(unsigned long arg)
125 struct net_bridge *br = (struct net_bridge *) arg; 125 struct net_bridge *br = (struct net_bridge *) arg;
126 126
127 pr_debug("%s: topo change timer expired\n", br->dev->name); 127 pr_debug("%s: topo change timer expired\n", br->dev->name);
128 spin_lock_bh(&br->lock); 128 spin_lock(&br->lock);
129 br->topology_change_detected = 0; 129 br->topology_change_detected = 0;
130 br->topology_change = 0; 130 br->topology_change = 0;
131 spin_unlock_bh(&br->lock); 131 spin_unlock(&br->lock);
132} 132}
133 133
134static void br_hold_timer_expired(unsigned long arg) 134static void br_hold_timer_expired(unsigned long arg)
@@ -138,45 +138,36 @@ static void br_hold_timer_expired(unsigned long arg)
138 pr_debug("%s: %d(%s) hold timer expired\n", 138 pr_debug("%s: %d(%s) hold timer expired\n",
139 p->br->dev->name, p->port_no, p->dev->name); 139 p->br->dev->name, p->port_no, p->dev->name);
140 140
141 spin_lock_bh(&p->br->lock); 141 spin_lock(&p->br->lock);
142 if (p->config_pending) 142 if (p->config_pending)
143 br_transmit_config(p); 143 br_transmit_config(p);
144 spin_unlock_bh(&p->br->lock); 144 spin_unlock(&p->br->lock);
145}
146
147static inline void br_timer_init(struct timer_list *timer,
148 void (*_function)(unsigned long),
149 unsigned long _data)
150{
151 init_timer(timer);
152 timer->function = _function;
153 timer->data = _data;
154} 145}
155 146
156void br_stp_timer_init(struct net_bridge *br) 147void br_stp_timer_init(struct net_bridge *br)
157{ 148{
158 br_timer_init(&br->hello_timer, br_hello_timer_expired, 149 setup_timer(&br->hello_timer, br_hello_timer_expired,
159 (unsigned long) br); 150 (unsigned long) br);
160 151
161 br_timer_init(&br->tcn_timer, br_tcn_timer_expired, 152 setup_timer(&br->tcn_timer, br_tcn_timer_expired,
162 (unsigned long) br); 153 (unsigned long) br);
163 154
164 br_timer_init(&br->topology_change_timer, 155 setup_timer(&br->topology_change_timer,
165 br_topology_change_timer_expired, 156 br_topology_change_timer_expired,
166 (unsigned long) br); 157 (unsigned long) br);
167 158
168 br_timer_init(&br->gc_timer, br_fdb_cleanup, (unsigned long) br); 159 setup_timer(&br->gc_timer, br_fdb_cleanup, (unsigned long) br);
169} 160}
170 161
171void br_stp_port_timer_init(struct net_bridge_port *p) 162void br_stp_port_timer_init(struct net_bridge_port *p)
172{ 163{
173 br_timer_init(&p->message_age_timer, br_message_age_timer_expired, 164 setup_timer(&p->message_age_timer, br_message_age_timer_expired,
174 (unsigned long) p); 165 (unsigned long) p);
175 166
176 br_timer_init(&p->forward_delay_timer, br_forward_delay_timer_expired, 167 setup_timer(&p->forward_delay_timer, br_forward_delay_timer_expired,
177 (unsigned long) p); 168 (unsigned long) p);
178 169
179 br_timer_init(&p->hold_timer, br_hold_timer_expired, 170 setup_timer(&p->hold_timer, br_hold_timer_expired,
180 (unsigned long) p); 171 (unsigned long) p);
181} 172}
182 173
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index 6f577f16c4c0..96bcb2ff59ab 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -242,6 +242,54 @@ static ssize_t show_gc_timer(struct class_device *cd, char *buf)
242} 242}
243static CLASS_DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL); 243static CLASS_DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
244 244
245static ssize_t show_group_addr(struct class_device *cd, char *buf)
246{
247 struct net_bridge *br = to_bridge(cd);
248 return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
249 br->group_addr[0], br->group_addr[1],
250 br->group_addr[2], br->group_addr[3],
251 br->group_addr[4], br->group_addr[5]);
252}
253
254static ssize_t store_group_addr(struct class_device *cd, const char *buf,
255 size_t len)
256{
257 struct net_bridge *br = to_bridge(cd);
258 unsigned new_addr[6];
259 int i;
260
261 if (!capable(CAP_NET_ADMIN))
262 return -EPERM;
263
264 if (sscanf(buf, "%x:%x:%x:%x:%x:%x",
265 &new_addr[0], &new_addr[1], &new_addr[2],
266 &new_addr[3], &new_addr[4], &new_addr[5]) != 6)
267 return -EINVAL;
268
269 /* Must be 01:80:c2:00:00:0X */
270 for (i = 0; i < 5; i++)
271 if (new_addr[i] != br_group_address[i])
272 return -EINVAL;
273
274 if (new_addr[5] & ~0xf)
275 return -EINVAL;
276
277 if (new_addr[5] == 1 /* 802.3x Pause address */
278 || new_addr[5] == 2 /* 802.3ad Slow protocols */
279 || new_addr[5] == 3) /* 802.1X PAE address */
280 return -EINVAL;
281
282 spin_lock_bh(&br->lock);
283 for (i = 0; i < 6; i++)
284 br->group_addr[i] = new_addr[i];
285 spin_unlock_bh(&br->lock);
286 return len;
287}
288
289static CLASS_DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
290 show_group_addr, store_group_addr);
291
292
245static struct attribute *bridge_attrs[] = { 293static struct attribute *bridge_attrs[] = {
246 &class_device_attr_forward_delay.attr, 294 &class_device_attr_forward_delay.attr,
247 &class_device_attr_hello_time.attr, 295 &class_device_attr_hello_time.attr,
@@ -259,6 +307,7 @@ static struct attribute *bridge_attrs[] = {
259 &class_device_attr_tcn_timer.attr, 307 &class_device_attr_tcn_timer.attr,
260 &class_device_attr_topology_change_timer.attr, 308 &class_device_attr_topology_change_timer.attr,
261 &class_device_attr_gc_timer.attr, 309 &class_device_attr_gc_timer.attr,
310 &class_device_attr_group_addr.attr,
262 NULL 311 NULL
263}; 312};
264 313
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index cbd4020cc84d..997953367204 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -35,6 +35,7 @@
35#define ASSERT_READ_LOCK(x) 35#define ASSERT_READ_LOCK(x)
36#define ASSERT_WRITE_LOCK(x) 36#define ASSERT_WRITE_LOCK(x)
37#include <linux/netfilter_ipv4/listhelp.h> 37#include <linux/netfilter_ipv4/listhelp.h>
38#include <linux/mutex.h>
38 39
39#if 0 40#if 0
40/* use this for remote debugging 41/* use this for remote debugging
@@ -81,7 +82,7 @@ static void print_string(char *str)
81 82
82 83
83 84
84static DECLARE_MUTEX(ebt_mutex); 85static DEFINE_MUTEX(ebt_mutex);
85static LIST_HEAD(ebt_tables); 86static LIST_HEAD(ebt_tables);
86static LIST_HEAD(ebt_targets); 87static LIST_HEAD(ebt_targets);
87static LIST_HEAD(ebt_matches); 88static LIST_HEAD(ebt_matches);
@@ -296,18 +297,18 @@ letscontinue:
296/* If it succeeds, returns element and locks mutex */ 297/* If it succeeds, returns element and locks mutex */
297static inline void * 298static inline void *
298find_inlist_lock_noload(struct list_head *head, const char *name, int *error, 299find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
299 struct semaphore *mutex) 300 struct mutex *mutex)
300{ 301{
301 void *ret; 302 void *ret;
302 303
303 *error = down_interruptible(mutex); 304 *error = mutex_lock_interruptible(mutex);
304 if (*error != 0) 305 if (*error != 0)
305 return NULL; 306 return NULL;
306 307
307 ret = list_named_find(head, name); 308 ret = list_named_find(head, name);
308 if (!ret) { 309 if (!ret) {
309 *error = -ENOENT; 310 *error = -ENOENT;
310 up(mutex); 311 mutex_unlock(mutex);
311 } 312 }
312 return ret; 313 return ret;
313} 314}
@@ -317,7 +318,7 @@ find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
317#else 318#else
318static void * 319static void *
319find_inlist_lock(struct list_head *head, const char *name, const char *prefix, 320find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
320 int *error, struct semaphore *mutex) 321 int *error, struct mutex *mutex)
321{ 322{
322 void *ret; 323 void *ret;
323 324
@@ -331,25 +332,25 @@ find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
331#endif 332#endif
332 333
333static inline struct ebt_table * 334static inline struct ebt_table *
334find_table_lock(const char *name, int *error, struct semaphore *mutex) 335find_table_lock(const char *name, int *error, struct mutex *mutex)
335{ 336{
336 return find_inlist_lock(&ebt_tables, name, "ebtable_", error, mutex); 337 return find_inlist_lock(&ebt_tables, name, "ebtable_", error, mutex);
337} 338}
338 339
339static inline struct ebt_match * 340static inline struct ebt_match *
340find_match_lock(const char *name, int *error, struct semaphore *mutex) 341find_match_lock(const char *name, int *error, struct mutex *mutex)
341{ 342{
342 return find_inlist_lock(&ebt_matches, name, "ebt_", error, mutex); 343 return find_inlist_lock(&ebt_matches, name, "ebt_", error, mutex);
343} 344}
344 345
345static inline struct ebt_watcher * 346static inline struct ebt_watcher *
346find_watcher_lock(const char *name, int *error, struct semaphore *mutex) 347find_watcher_lock(const char *name, int *error, struct mutex *mutex)
347{ 348{
348 return find_inlist_lock(&ebt_watchers, name, "ebt_", error, mutex); 349 return find_inlist_lock(&ebt_watchers, name, "ebt_", error, mutex);
349} 350}
350 351
351static inline struct ebt_target * 352static inline struct ebt_target *
352find_target_lock(const char *name, int *error, struct semaphore *mutex) 353find_target_lock(const char *name, int *error, struct mutex *mutex)
353{ 354{
354 return find_inlist_lock(&ebt_targets, name, "ebt_", error, mutex); 355 return find_inlist_lock(&ebt_targets, name, "ebt_", error, mutex);
355} 356}
@@ -369,10 +370,10 @@ ebt_check_match(struct ebt_entry_match *m, struct ebt_entry *e,
369 return ret; 370 return ret;
370 m->u.match = match; 371 m->u.match = match;
371 if (!try_module_get(match->me)) { 372 if (!try_module_get(match->me)) {
372 up(&ebt_mutex); 373 mutex_unlock(&ebt_mutex);
373 return -ENOENT; 374 return -ENOENT;
374 } 375 }
375 up(&ebt_mutex); 376 mutex_unlock(&ebt_mutex);
376 if (match->check && 377 if (match->check &&
377 match->check(name, hookmask, e, m->data, m->match_size) != 0) { 378 match->check(name, hookmask, e, m->data, m->match_size) != 0) {
378 BUGPRINT("match->check failed\n"); 379 BUGPRINT("match->check failed\n");
@@ -398,10 +399,10 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct ebt_entry *e,
398 return ret; 399 return ret;
399 w->u.watcher = watcher; 400 w->u.watcher = watcher;
400 if (!try_module_get(watcher->me)) { 401 if (!try_module_get(watcher->me)) {
401 up(&ebt_mutex); 402 mutex_unlock(&ebt_mutex);
402 return -ENOENT; 403 return -ENOENT;
403 } 404 }
404 up(&ebt_mutex); 405 mutex_unlock(&ebt_mutex);
405 if (watcher->check && 406 if (watcher->check &&
406 watcher->check(name, hookmask, e, w->data, w->watcher_size) != 0) { 407 watcher->check(name, hookmask, e, w->data, w->watcher_size) != 0) {
407 BUGPRINT("watcher->check failed\n"); 408 BUGPRINT("watcher->check failed\n");
@@ -638,11 +639,11 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
638 if (!target) 639 if (!target)
639 goto cleanup_watchers; 640 goto cleanup_watchers;
640 if (!try_module_get(target->me)) { 641 if (!try_module_get(target->me)) {
641 up(&ebt_mutex); 642 mutex_unlock(&ebt_mutex);
642 ret = -ENOENT; 643 ret = -ENOENT;
643 goto cleanup_watchers; 644 goto cleanup_watchers;
644 } 645 }
645 up(&ebt_mutex); 646 mutex_unlock(&ebt_mutex);
646 647
647 t->u.target = target; 648 t->u.target = target;
648 if (t->u.target == &ebt_standard_target) { 649 if (t->u.target == &ebt_standard_target) {
@@ -1015,7 +1016,7 @@ static int do_replace(void __user *user, unsigned int len)
1015 1016
1016 t->private = newinfo; 1017 t->private = newinfo;
1017 write_unlock_bh(&t->lock); 1018 write_unlock_bh(&t->lock);
1018 up(&ebt_mutex); 1019 mutex_unlock(&ebt_mutex);
1019 /* so, a user can change the chains while having messed up her counter 1020 /* so, a user can change the chains while having messed up her counter
1020 allocation. Only reason why this is done is because this way the lock 1021 allocation. Only reason why this is done is because this way the lock
1021 is held only once, while this doesn't bring the kernel into a 1022 is held only once, while this doesn't bring the kernel into a
@@ -1045,7 +1046,7 @@ static int do_replace(void __user *user, unsigned int len)
1045 return ret; 1046 return ret;
1046 1047
1047free_unlock: 1048free_unlock:
1048 up(&ebt_mutex); 1049 mutex_unlock(&ebt_mutex);
1049free_iterate: 1050free_iterate:
1050 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1051 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1051 ebt_cleanup_entry, NULL); 1052 ebt_cleanup_entry, NULL);
@@ -1068,69 +1069,69 @@ int ebt_register_target(struct ebt_target *target)
1068{ 1069{
1069 int ret; 1070 int ret;
1070 1071
1071 ret = down_interruptible(&ebt_mutex); 1072 ret = mutex_lock_interruptible(&ebt_mutex);
1072 if (ret != 0) 1073 if (ret != 0)
1073 return ret; 1074 return ret;
1074 if (!list_named_insert(&ebt_targets, target)) { 1075 if (!list_named_insert(&ebt_targets, target)) {
1075 up(&ebt_mutex); 1076 mutex_unlock(&ebt_mutex);
1076 return -EEXIST; 1077 return -EEXIST;
1077 } 1078 }
1078 up(&ebt_mutex); 1079 mutex_unlock(&ebt_mutex);
1079 1080
1080 return 0; 1081 return 0;
1081} 1082}
1082 1083
1083void ebt_unregister_target(struct ebt_target *target) 1084void ebt_unregister_target(struct ebt_target *target)
1084{ 1085{
1085 down(&ebt_mutex); 1086 mutex_lock(&ebt_mutex);
1086 LIST_DELETE(&ebt_targets, target); 1087 LIST_DELETE(&ebt_targets, target);
1087 up(&ebt_mutex); 1088 mutex_unlock(&ebt_mutex);
1088} 1089}
1089 1090
1090int ebt_register_match(struct ebt_match *match) 1091int ebt_register_match(struct ebt_match *match)
1091{ 1092{
1092 int ret; 1093 int ret;
1093 1094
1094 ret = down_interruptible(&ebt_mutex); 1095 ret = mutex_lock_interruptible(&ebt_mutex);
1095 if (ret != 0) 1096 if (ret != 0)
1096 return ret; 1097 return ret;
1097 if (!list_named_insert(&ebt_matches, match)) { 1098 if (!list_named_insert(&ebt_matches, match)) {
1098 up(&ebt_mutex); 1099 mutex_unlock(&ebt_mutex);
1099 return -EEXIST; 1100 return -EEXIST;
1100 } 1101 }
1101 up(&ebt_mutex); 1102 mutex_unlock(&ebt_mutex);
1102 1103
1103 return 0; 1104 return 0;
1104} 1105}
1105 1106
1106void ebt_unregister_match(struct ebt_match *match) 1107void ebt_unregister_match(struct ebt_match *match)
1107{ 1108{
1108 down(&ebt_mutex); 1109 mutex_lock(&ebt_mutex);
1109 LIST_DELETE(&ebt_matches, match); 1110 LIST_DELETE(&ebt_matches, match);
1110 up(&ebt_mutex); 1111 mutex_unlock(&ebt_mutex);
1111} 1112}
1112 1113
1113int ebt_register_watcher(struct ebt_watcher *watcher) 1114int ebt_register_watcher(struct ebt_watcher *watcher)
1114{ 1115{
1115 int ret; 1116 int ret;
1116 1117
1117 ret = down_interruptible(&ebt_mutex); 1118 ret = mutex_lock_interruptible(&ebt_mutex);
1118 if (ret != 0) 1119 if (ret != 0)
1119 return ret; 1120 return ret;
1120 if (!list_named_insert(&ebt_watchers, watcher)) { 1121 if (!list_named_insert(&ebt_watchers, watcher)) {
1121 up(&ebt_mutex); 1122 mutex_unlock(&ebt_mutex);
1122 return -EEXIST; 1123 return -EEXIST;
1123 } 1124 }
1124 up(&ebt_mutex); 1125 mutex_unlock(&ebt_mutex);
1125 1126
1126 return 0; 1127 return 0;
1127} 1128}
1128 1129
1129void ebt_unregister_watcher(struct ebt_watcher *watcher) 1130void ebt_unregister_watcher(struct ebt_watcher *watcher)
1130{ 1131{
1131 down(&ebt_mutex); 1132 mutex_lock(&ebt_mutex);
1132 LIST_DELETE(&ebt_watchers, watcher); 1133 LIST_DELETE(&ebt_watchers, watcher);
1133 up(&ebt_mutex); 1134 mutex_unlock(&ebt_mutex);
1134} 1135}
1135 1136
1136int ebt_register_table(struct ebt_table *table) 1137int ebt_register_table(struct ebt_table *table)
@@ -1178,7 +1179,7 @@ int ebt_register_table(struct ebt_table *table)
1178 1179
1179 table->private = newinfo; 1180 table->private = newinfo;
1180 rwlock_init(&table->lock); 1181 rwlock_init(&table->lock);
1181 ret = down_interruptible(&ebt_mutex); 1182 ret = mutex_lock_interruptible(&ebt_mutex);
1182 if (ret != 0) 1183 if (ret != 0)
1183 goto free_chainstack; 1184 goto free_chainstack;
1184 1185
@@ -1194,10 +1195,10 @@ int ebt_register_table(struct ebt_table *table)
1194 goto free_unlock; 1195 goto free_unlock;
1195 } 1196 }
1196 list_prepend(&ebt_tables, table); 1197 list_prepend(&ebt_tables, table);
1197 up(&ebt_mutex); 1198 mutex_unlock(&ebt_mutex);
1198 return 0; 1199 return 0;
1199free_unlock: 1200free_unlock:
1200 up(&ebt_mutex); 1201 mutex_unlock(&ebt_mutex);
1201free_chainstack: 1202free_chainstack:
1202 if (newinfo->chainstack) { 1203 if (newinfo->chainstack) {
1203 for_each_cpu(i) 1204 for_each_cpu(i)
@@ -1218,9 +1219,9 @@ void ebt_unregister_table(struct ebt_table *table)
1218 BUGPRINT("Request to unregister NULL table!!!\n"); 1219 BUGPRINT("Request to unregister NULL table!!!\n");
1219 return; 1220 return;
1220 } 1221 }
1221 down(&ebt_mutex); 1222 mutex_lock(&ebt_mutex);
1222 LIST_DELETE(&ebt_tables, table); 1223 LIST_DELETE(&ebt_tables, table);
1223 up(&ebt_mutex); 1224 mutex_unlock(&ebt_mutex);
1224 vfree(table->private->entries); 1225 vfree(table->private->entries);
1225 if (table->private->chainstack) { 1226 if (table->private->chainstack) {
1226 for_each_cpu(i) 1227 for_each_cpu(i)
@@ -1281,7 +1282,7 @@ static int update_counters(void __user *user, unsigned int len)
1281 write_unlock_bh(&t->lock); 1282 write_unlock_bh(&t->lock);
1282 ret = 0; 1283 ret = 0;
1283unlock_mutex: 1284unlock_mutex:
1284 up(&ebt_mutex); 1285 mutex_unlock(&ebt_mutex);
1285free_tmp: 1286free_tmp:
1286 vfree(tmp); 1287 vfree(tmp);
1287 return ret; 1288 return ret;
@@ -1328,7 +1329,7 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char *ubase)
1328 return 0; 1329 return 0;
1329} 1330}
1330 1331
1331/* called with ebt_mutex down */ 1332/* called with ebt_mutex locked */
1332static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1333static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1333 int *len, int cmd) 1334 int *len, int cmd)
1334{ 1335{
@@ -1440,7 +1441,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1440 case EBT_SO_GET_INIT_INFO: 1441 case EBT_SO_GET_INIT_INFO:
1441 if (*len != sizeof(struct ebt_replace)){ 1442 if (*len != sizeof(struct ebt_replace)){
1442 ret = -EINVAL; 1443 ret = -EINVAL;
1443 up(&ebt_mutex); 1444 mutex_unlock(&ebt_mutex);
1444 break; 1445 break;
1445 } 1446 }
1446 if (cmd == EBT_SO_GET_INFO) { 1447 if (cmd == EBT_SO_GET_INFO) {
@@ -1452,7 +1453,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1452 tmp.entries_size = t->table->entries_size; 1453 tmp.entries_size = t->table->entries_size;
1453 tmp.valid_hooks = t->table->valid_hooks; 1454 tmp.valid_hooks = t->table->valid_hooks;
1454 } 1455 }
1455 up(&ebt_mutex); 1456 mutex_unlock(&ebt_mutex);
1456 if (copy_to_user(user, &tmp, *len) != 0){ 1457 if (copy_to_user(user, &tmp, *len) != 0){
1457 BUGPRINT("c2u Didn't work\n"); 1458 BUGPRINT("c2u Didn't work\n");
1458 ret = -EFAULT; 1459 ret = -EFAULT;
@@ -1464,11 +1465,11 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1464 case EBT_SO_GET_ENTRIES: 1465 case EBT_SO_GET_ENTRIES:
1465 case EBT_SO_GET_INIT_ENTRIES: 1466 case EBT_SO_GET_INIT_ENTRIES:
1466 ret = copy_everything_to_user(t, user, len, cmd); 1467 ret = copy_everything_to_user(t, user, len, cmd);
1467 up(&ebt_mutex); 1468 mutex_unlock(&ebt_mutex);
1468 break; 1469 break;
1469 1470
1470 default: 1471 default:
1471 up(&ebt_mutex); 1472 mutex_unlock(&ebt_mutex);
1472 ret = -EINVAL; 1473 ret = -EINVAL;
1473 } 1474 }
1474 1475
@@ -1476,17 +1477,23 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1476} 1477}
1477 1478
1478static struct nf_sockopt_ops ebt_sockopts = 1479static struct nf_sockopt_ops ebt_sockopts =
1479{ { NULL, NULL }, PF_INET, EBT_BASE_CTL, EBT_SO_SET_MAX + 1, do_ebt_set_ctl, 1480{
1480 EBT_BASE_CTL, EBT_SO_GET_MAX + 1, do_ebt_get_ctl, 0, NULL 1481 .pf = PF_INET,
1482 .set_optmin = EBT_BASE_CTL,
1483 .set_optmax = EBT_SO_SET_MAX + 1,
1484 .set = do_ebt_set_ctl,
1485 .get_optmin = EBT_BASE_CTL,
1486 .get_optmax = EBT_SO_GET_MAX + 1,
1487 .get = do_ebt_get_ctl,
1481}; 1488};
1482 1489
1483static int __init init(void) 1490static int __init init(void)
1484{ 1491{
1485 int ret; 1492 int ret;
1486 1493
1487 down(&ebt_mutex); 1494 mutex_lock(&ebt_mutex);
1488 list_named_insert(&ebt_targets, &ebt_standard_target); 1495 list_named_insert(&ebt_targets, &ebt_standard_target);
1489 up(&ebt_mutex); 1496 mutex_unlock(&ebt_mutex);
1490 if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0) 1497 if ((ret = nf_register_sockopt(&ebt_sockopts)) < 0)
1491 return ret; 1498 return ret;
1492 1499