aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
committerPatrick McHardy <kaber@trash.net>2010-05-10 12:39:28 -0400
commit1e4b1057121bc756b91758a434b504d2010f6088 (patch)
treeb016cf2c728289c7e36d9e4e488f30ab0bd0ae6e /net/bridge
parent3b254c54ec46eb022cb26ee6ab37fae23f5f7d6a (diff)
parent3ee943728fff536edaf8f59faa58aaa1aa7366e3 (diff)
Merge branch 'master' of /repos/git/net-next-2.6
Conflicts: net/bridge/br_device.c net/bridge/br_forward.c Signed-off-by: Patrick McHardy <kaber@trash.net>
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/Kconfig6
-rw-r--r--net/bridge/br_device.c62
-rw-r--r--net/bridge/br_forward.c39
-rw-r--r--net/bridge/br_if.c25
-rw-r--r--net/bridge/br_multicast.c662
-rw-r--r--net/bridge/br_private.h32
6 files changed, 726 insertions, 100 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index d115d5cea5b6..9190ae462cb4 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -33,14 +33,14 @@ config BRIDGE
33 If unsure, say N. 33 If unsure, say N.
34 34
35config BRIDGE_IGMP_SNOOPING 35config BRIDGE_IGMP_SNOOPING
36 bool "IGMP snooping" 36 bool "IGMP/MLD snooping"
37 depends on BRIDGE 37 depends on BRIDGE
38 depends on INET 38 depends on INET
39 default y 39 default y
40 ---help--- 40 ---help---
41 If you say Y here, then the Ethernet bridge will be able selectively 41 If you say Y here, then the Ethernet bridge will be able selectively
42 forward multicast traffic based on IGMP traffic received from each 42 forward multicast traffic based on IGMP/MLD traffic received from
43 port. 43 each port.
44 44
45 Say N to exclude this support and reduce the binary size. 45 Say N to exclude this support and reduce the binary size.
46 46
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 007bde87415d..f15f9c4a0dd2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -13,9 +13,12 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/list.h>
18#include <linux/netfilter_bridge.h> 20#include <linux/netfilter_bridge.h>
21
19#include <asm/uaccess.h> 22#include <asm/uaccess.h>
20#include "br_private.h" 23#include "br_private.h"
21 24
@@ -43,7 +46,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
43 skb_reset_mac_header(skb); 46 skb_reset_mac_header(skb);
44 skb_pull(skb, ETH_HLEN); 47 skb_pull(skb, ETH_HLEN);
45 48
46 if (dest[0] & 1) { 49 if (is_multicast_ether_addr(dest)) {
47 if (br_multicast_rcv(br, NULL, skb)) 50 if (br_multicast_rcv(br, NULL, skb))
48 goto out; 51 goto out;
49 52
@@ -195,6 +198,59 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
195 return 0; 198 return 0;
196} 199}
197 200
201#ifdef CONFIG_NET_POLL_CONTROLLER
202bool br_devices_support_netpoll(struct net_bridge *br)
203{
204 struct net_bridge_port *p;
205 bool ret = true;
206 int count = 0;
207 unsigned long flags;
208
209 spin_lock_irqsave(&br->lock, flags);
210 list_for_each_entry(p, &br->port_list, list) {
211 count++;
212 if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
213 !p->dev->netdev_ops->ndo_poll_controller)
214 ret = false;
215 }
216 spin_unlock_irqrestore(&br->lock, flags);
217 return count != 0 && ret;
218}
219
220static void br_poll_controller(struct net_device *br_dev)
221{
222 struct netpoll *np = br_dev->npinfo->netpoll;
223
224 if (np->real_dev != br_dev)
225 netpoll_poll_dev(np->real_dev);
226}
227
228void br_netpoll_cleanup(struct net_device *br_dev)
229{
230 struct net_bridge *br = netdev_priv(br_dev);
231 struct net_bridge_port *p, *n;
232 const struct net_device_ops *ops;
233
234 br->dev->npinfo = NULL;
235 list_for_each_entry_safe(p, n, &br->port_list, list) {
236 if (p->dev) {
237 ops = p->dev->netdev_ops;
238 if (ops->ndo_netpoll_cleanup)
239 ops->ndo_netpoll_cleanup(p->dev);
240 else
241 p->dev->npinfo = NULL;
242 }
243 }
244}
245
246#else
247
248void br_netpoll_cleanup(struct net_device *br_dev)
249{
250}
251
252#endif
253
198static const struct ethtool_ops br_ethtool_ops = { 254static const struct ethtool_ops br_ethtool_ops = {
199 .get_drvinfo = br_getinfo, 255 .get_drvinfo = br_getinfo,
200 .get_link = ethtool_op_get_link, 256 .get_link = ethtool_op_get_link,
@@ -218,6 +274,10 @@ static const struct net_device_ops br_netdev_ops = {
218 .ndo_set_multicast_list = br_dev_set_multicast_list, 274 .ndo_set_multicast_list = br_dev_set_multicast_list,
219 .ndo_change_mtu = br_change_mtu, 275 .ndo_change_mtu = br_change_mtu,
220 .ndo_do_ioctl = br_dev_ioctl, 276 .ndo_do_ioctl = br_dev_ioctl,
277#ifdef CONFIG_NET_POLL_CONTROLLER
278 .ndo_netpoll_cleanup = br_netpoll_cleanup,
279 .ndo_poll_controller = br_poll_controller,
280#endif
221}; 281};
222 282
223static void br_dev_free(struct net_device *dev) 283static void br_dev_free(struct net_device *dev)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 92fb3293a215..a98ef1393097 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -15,6 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/netpoll.h>
18#include <linux/skbuff.h> 19#include <linux/skbuff.h>
19#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
20#include <linux/netfilter_bridge.h> 21#include <linux/netfilter_bridge.h>
@@ -50,7 +51,13 @@ int br_dev_queue_push_xmit(struct sk_buff *skb)
50 else { 51 else {
51 skb_push(skb, ETH_HLEN); 52 skb_push(skb, ETH_HLEN);
52 53
53 dev_queue_xmit(skb); 54#ifdef CONFIG_NET_POLL_CONTROLLER
55 if (unlikely(skb->dev->priv_flags & IFF_IN_NETPOLL)) {
56 netpoll_send_skb(skb->dev->npinfo->netpoll, skb);
57 skb->dev->priv_flags &= ~IFF_IN_NETPOLL;
58 } else
59#endif
60 dev_queue_xmit(skb);
54 } 61 }
55 } 62 }
56 63
@@ -66,9 +73,23 @@ int br_forward_finish(struct sk_buff *skb)
66 73
67static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) 74static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
68{ 75{
76#ifdef CONFIG_NET_POLL_CONTROLLER
77 struct net_bridge *br = to->br;
78 if (unlikely(br->dev->priv_flags & IFF_IN_NETPOLL)) {
79 struct netpoll *np;
80 to->dev->npinfo = skb->dev->npinfo;
81 np = skb->dev->npinfo->netpoll;
82 np->real_dev = np->dev = to->dev;
83 to->dev->priv_flags |= IFF_IN_NETPOLL;
84 }
85#endif
69 skb->dev = to->dev; 86 skb->dev = to->dev;
70 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 87 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
71 br_forward_finish); 88 br_forward_finish);
89#ifdef CONFIG_NET_POLL_CONTROLLER
90 if (skb->dev->npinfo)
91 skb->dev->npinfo->netpoll->dev = br->dev;
92#endif
72} 93}
73 94
74static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 95static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
@@ -208,17 +229,15 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
208{ 229{
209 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev; 230 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
210 struct net_bridge *br = netdev_priv(dev); 231 struct net_bridge *br = netdev_priv(dev);
211 struct net_bridge_port *port; 232 struct net_bridge_port *prev = NULL;
212 struct net_bridge_port *lport, *rport;
213 struct net_bridge_port *prev;
214 struct net_bridge_port_group *p; 233 struct net_bridge_port_group *p;
215 struct hlist_node *rp; 234 struct hlist_node *rp;
216 235
217 prev = NULL; 236 rp = rcu_dereference(br->router_list.first);
218 237 p = mdst ? rcu_dereference(mdst->ports) : NULL;
219 rp = br->router_list.first;
220 p = mdst ? mdst->ports : NULL;
221 while (p || rp) { 238 while (p || rp) {
239 struct net_bridge_port *port, *lport, *rport;
240
222 lport = p ? p->port : NULL; 241 lport = p ? p->port : NULL;
223 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) : 242 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
224 NULL; 243 NULL;
@@ -231,9 +250,9 @@ static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
231 goto out; 250 goto out;
232 251
233 if ((unsigned long)lport >= (unsigned long)port) 252 if ((unsigned long)lport >= (unsigned long)port)
234 p = p->next; 253 p = rcu_dereference(p->next);
235 if ((unsigned long)rport >= (unsigned long)port) 254 if ((unsigned long)rport >= (unsigned long)port)
236 rp = rp->next; 255 rp = rcu_dereference(rp->next);
237 } 256 }
238 257
239 if (!prev) 258 if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 521439333316..537bdd60d9b9 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -13,6 +13,7 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
16#include <linux/ethtool.h> 17#include <linux/ethtool.h>
17#include <linux/if_arp.h> 18#include <linux/if_arp.h>
18#include <linux/module.h> 19#include <linux/module.h>
@@ -153,6 +154,14 @@ static void del_nbp(struct net_bridge_port *p)
153 kobject_uevent(&p->kobj, KOBJ_REMOVE); 154 kobject_uevent(&p->kobj, KOBJ_REMOVE);
154 kobject_del(&p->kobj); 155 kobject_del(&p->kobj);
155 156
157#ifdef CONFIG_NET_POLL_CONTROLLER
158 if (br_devices_support_netpoll(br))
159 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
160 if (dev->netdev_ops->ndo_netpoll_cleanup)
161 dev->netdev_ops->ndo_netpoll_cleanup(dev);
162 else
163 dev->npinfo = NULL;
164#endif
156 call_rcu(&p->rcu, destroy_nbp_rcu); 165 call_rcu(&p->rcu, destroy_nbp_rcu);
157} 166}
158 167
@@ -165,6 +174,8 @@ static void del_br(struct net_bridge *br, struct list_head *head)
165 del_nbp(p); 174 del_nbp(p);
166 } 175 }
167 176
177 br_netpoll_cleanup(br->dev);
178
168 del_timer_sync(&br->gc_timer); 179 del_timer_sync(&br->gc_timer);
169 180
170 br_sysfs_delbr(br->dev); 181 br_sysfs_delbr(br->dev);
@@ -444,6 +455,20 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
444 455
445 kobject_uevent(&p->kobj, KOBJ_ADD); 456 kobject_uevent(&p->kobj, KOBJ_ADD);
446 457
458#ifdef CONFIG_NET_POLL_CONTROLLER
459 if (br_devices_support_netpoll(br)) {
460 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
461 if (br->dev->npinfo)
462 dev->npinfo = br->dev->npinfo;
463 } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
464 br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
465 printk(KERN_INFO "New device %s does not support netpoll\n",
466 dev->name);
467 printk(KERN_INFO "Disabling netpoll for %s\n",
468 br->dev->name);
469 }
470#endif
471
447 return 0; 472 return 0;
448err2: 473err2:
449 br_fdb_delete_by_port(br, p, 1); 474 br_fdb_delete_by_port(br, p, 1);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 8ccdb8ee3928..c8419e240316 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -24,51 +24,139 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <net/ip.h> 26#include <net/ip.h>
27#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28#include <net/ipv6.h>
29#include <net/mld.h>
30#include <net/addrconf.h>
31#include <net/ip6_checksum.h>
32#endif
27 33
28#include "br_private.h" 34#include "br_private.h"
29 35
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 36#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
37static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
31{ 38{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); 39 if (ipv6_addr_is_multicast(addr) &&
40 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
41 return 1;
42 return 0;
43}
44#endif
45
46static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
47{
48 if (a->proto != b->proto)
49 return 0;
50 switch (a->proto) {
51 case htons(ETH_P_IP):
52 return a->u.ip4 == b->u.ip4;
53#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
54 case htons(ETH_P_IPV6):
55 return ipv6_addr_equal(&a->u.ip6, &b->u.ip6);
56#endif
57 }
58 return 0;
59}
60
61static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
62{
63 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
64}
65
66#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
67static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb,
68 const struct in6_addr *ip)
69{
70 return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1);
71}
72#endif
73
74static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb,
75 struct br_ip *ip)
76{
77 switch (ip->proto) {
78 case htons(ETH_P_IP):
79 return __br_ip4_hash(mdb, ip->u.ip4);
80#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
81 case htons(ETH_P_IPV6):
82 return __br_ip6_hash(mdb, &ip->u.ip6);
83#endif
84 }
85 return 0;
33} 86}
34 87
35static struct net_bridge_mdb_entry *__br_mdb_ip_get( 88static struct net_bridge_mdb_entry *__br_mdb_ip_get(
36 struct net_bridge_mdb_htable *mdb, __be32 dst, int hash) 89 struct net_bridge_mdb_htable *mdb, struct br_ip *dst, int hash)
37{ 90{
38 struct net_bridge_mdb_entry *mp; 91 struct net_bridge_mdb_entry *mp;
39 struct hlist_node *p; 92 struct hlist_node *p;
40 93
41 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 94 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
42 if (dst == mp->addr) 95 if (br_ip_equal(&mp->addr, dst))
43 return mp; 96 return mp;
44 } 97 }
45 98
46 return NULL; 99 return NULL;
47} 100}
48 101
49static struct net_bridge_mdb_entry *br_mdb_ip_get( 102static struct net_bridge_mdb_entry *br_mdb_ip4_get(
50 struct net_bridge_mdb_htable *mdb, __be32 dst) 103 struct net_bridge_mdb_htable *mdb, __be32 dst)
51{ 104{
52 if (!mdb) 105 struct br_ip br_dst;
53 return NULL; 106
107 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP);
109
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst));
111}
112
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
114static struct net_bridge_mdb_entry *br_mdb_ip6_get(
115 struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst)
116{
117 struct br_ip br_dst;
54 118
119 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6);
121
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst));
123}
124#endif
125
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); 129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
56} 130}
57 131
58struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
59 struct sk_buff *skb) 133 struct sk_buff *skb)
60{ 134{
61 if (br->multicast_disabled) 135 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip;
137
138 if (!mdb || br->multicast_disabled)
139 return NULL;
140
141 if (BR_INPUT_SKB_CB(skb)->igmp)
62 return NULL; 142 return NULL;
63 143
144 ip.proto = skb->protocol;
145
64 switch (skb->protocol) { 146 switch (skb->protocol) {
65 case htons(ETH_P_IP): 147 case htons(ETH_P_IP):
66 if (BR_INPUT_SKB_CB(skb)->igmp) 148 ip.u.ip4 = ip_hdr(skb)->daddr;
67 break; 149 break;
68 return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr); 150#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
151 case htons(ETH_P_IPV6):
152 ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr);
153 break;
154#endif
155 default:
156 return NULL;
69 } 157 }
70 158
71 return NULL; 159 return br_mdb_ip_get(mdb, &ip);
72} 160}
73 161
74static void br_mdb_free(struct rcu_head *head) 162static void br_mdb_free(struct rcu_head *head)
@@ -95,7 +183,7 @@ static int br_mdb_copy(struct net_bridge_mdb_htable *new,
95 for (i = 0; i < old->max; i++) 183 for (i = 0; i < old->max; i++)
96 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) 184 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
97 hlist_add_head(&mp->hlist[new->ver], 185 hlist_add_head(&mp->hlist[new->ver],
98 &new->mhash[br_ip_hash(new, mp->addr)]); 186 &new->mhash[br_ip_hash(new, &mp->addr)]);
99 187
100 if (!elasticity) 188 if (!elasticity)
101 return 0; 189 return 0;
@@ -163,7 +251,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
163 struct net_bridge_port_group *p; 251 struct net_bridge_port_group *p;
164 struct net_bridge_port_group **pp; 252 struct net_bridge_port_group **pp;
165 253
166 mp = br_mdb_ip_get(mdb, pg->addr); 254 mp = br_mdb_ip_get(mdb, &pg->addr);
167 if (WARN_ON(!mp)) 255 if (WARN_ON(!mp))
168 return; 256 return;
169 257
@@ -171,7 +259,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
171 if (p != pg) 259 if (p != pg)
172 continue; 260 continue;
173 261
174 *pp = p->next; 262 rcu_assign_pointer(*pp, p->next);
175 hlist_del_init(&p->mglist); 263 hlist_del_init(&p->mglist);
176 del_timer(&p->timer); 264 del_timer(&p->timer);
177 del_timer(&p->query_timer); 265 del_timer(&p->query_timer);
@@ -249,8 +337,8 @@ out:
249 return 0; 337 return 0;
250} 338}
251 339
252static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, 340static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
253 __be32 group) 341 __be32 group)
254{ 342{
255 struct sk_buff *skb; 343 struct sk_buff *skb;
256 struct igmphdr *ih; 344 struct igmphdr *ih;
@@ -314,12 +402,104 @@ out:
314 return skb; 402 return skb;
315} 403}
316 404
405#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
406static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
407 struct in6_addr *group)
408{
409 struct sk_buff *skb;
410 struct ipv6hdr *ip6h;
411 struct mld_msg *mldq;
412 struct ethhdr *eth;
413 u8 *hopopt;
414 unsigned long interval;
415
416 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*ip6h) +
417 8 + sizeof(*mldq));
418 if (!skb)
419 goto out;
420
421 skb->protocol = htons(ETH_P_IPV6);
422
423 /* Ethernet header */
424 skb_reset_mac_header(skb);
425 eth = eth_hdr(skb);
426
427 memcpy(eth->h_source, br->dev->dev_addr, 6);
428 ipv6_eth_mc_map(group, eth->h_dest);
429 eth->h_proto = htons(ETH_P_IPV6);
430 skb_put(skb, sizeof(*eth));
431
432 /* IPv6 header + HbH option */
433 skb_set_network_header(skb, skb->len);
434 ip6h = ipv6_hdr(skb);
435
436 *(__force __be32 *)ip6h = htonl(0x60000000);
437 ip6h->payload_len = 8 + sizeof(*mldq);
438 ip6h->nexthdr = IPPROTO_HOPOPTS;
439 ip6h->hop_limit = 1;
440 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0);
441 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
442
443 hopopt = (u8 *)(ip6h + 1);
444 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
445 hopopt[1] = 0; /* length of HbH */
446 hopopt[2] = IPV6_TLV_ROUTERALERT; /* Router Alert */
447 hopopt[3] = 2; /* Length of RA Option */
448 hopopt[4] = 0; /* Type = 0x0000 (MLD) */
449 hopopt[5] = 0;
450 hopopt[6] = IPV6_TLV_PAD0; /* Pad0 */
451 hopopt[7] = IPV6_TLV_PAD0; /* Pad0 */
452
453 skb_put(skb, sizeof(*ip6h) + 8);
454
455 /* ICMPv6 */
456 skb_set_transport_header(skb, skb->len);
457 mldq = (struct mld_msg *) icmp6_hdr(skb);
458
459 interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
460 br->multicast_query_response_interval;
461
462 mldq->mld_type = ICMPV6_MGM_QUERY;
463 mldq->mld_code = 0;
464 mldq->mld_cksum = 0;
465 mldq->mld_maxdelay = htons((u16)jiffies_to_msecs(interval));
466 mldq->mld_reserved = 0;
467 ipv6_addr_copy(&mldq->mld_mca, group);
468
469 /* checksum */
470 mldq->mld_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
471 sizeof(*mldq), IPPROTO_ICMPV6,
472 csum_partial(mldq,
473 sizeof(*mldq), 0));
474 skb_put(skb, sizeof(*mldq));
475
476 __skb_pull(skb, sizeof(*eth));
477
478out:
479 return skb;
480}
481#endif
482
483static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
484 struct br_ip *addr)
485{
486 switch (addr->proto) {
487 case htons(ETH_P_IP):
488 return br_ip4_multicast_alloc_query(br, addr->u.ip4);
489#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
490 case htons(ETH_P_IPV6):
491 return br_ip6_multicast_alloc_query(br, &addr->u.ip6);
492#endif
493 }
494 return NULL;
495}
496
317static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) 497static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
318{ 498{
319 struct net_bridge *br = mp->br; 499 struct net_bridge *br = mp->br;
320 struct sk_buff *skb; 500 struct sk_buff *skb;
321 501
322 skb = br_multicast_alloc_query(br, mp->addr); 502 skb = br_multicast_alloc_query(br, &mp->addr);
323 if (!skb) 503 if (!skb)
324 goto timer; 504 goto timer;
325 505
@@ -353,7 +533,7 @@ static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
353 struct net_bridge *br = port->br; 533 struct net_bridge *br = port->br;
354 struct sk_buff *skb; 534 struct sk_buff *skb;
355 535
356 skb = br_multicast_alloc_query(br, pg->addr); 536 skb = br_multicast_alloc_query(br, &pg->addr);
357 if (!skb) 537 if (!skb)
358 goto timer; 538 goto timer;
359 539
@@ -383,8 +563,8 @@ out:
383} 563}
384 564
385static struct net_bridge_mdb_entry *br_multicast_get_group( 565static struct net_bridge_mdb_entry *br_multicast_get_group(
386 struct net_bridge *br, struct net_bridge_port *port, __be32 group, 566 struct net_bridge *br, struct net_bridge_port *port,
387 int hash) 567 struct br_ip *group, int hash)
388{ 568{
389 struct net_bridge_mdb_htable *mdb = br->mdb; 569 struct net_bridge_mdb_htable *mdb = br->mdb;
390 struct net_bridge_mdb_entry *mp; 570 struct net_bridge_mdb_entry *mp;
@@ -396,9 +576,8 @@ static struct net_bridge_mdb_entry *br_multicast_get_group(
396 576
397 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { 577 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
398 count++; 578 count++;
399 if (unlikely(group == mp->addr)) { 579 if (unlikely(br_ip_equal(group, &mp->addr)))
400 return mp; 580 return mp;
401 }
402 } 581 }
403 582
404 elasticity = 0; 583 elasticity = 0;
@@ -463,7 +642,8 @@ err:
463} 642}
464 643
465static struct net_bridge_mdb_entry *br_multicast_new_group( 644static struct net_bridge_mdb_entry *br_multicast_new_group(
466 struct net_bridge *br, struct net_bridge_port *port, __be32 group) 645 struct net_bridge *br, struct net_bridge_port *port,
646 struct br_ip *group)
467{ 647{
468 struct net_bridge_mdb_htable *mdb = br->mdb; 648 struct net_bridge_mdb_htable *mdb = br->mdb;
469 struct net_bridge_mdb_entry *mp; 649 struct net_bridge_mdb_entry *mp;
@@ -496,7 +676,7 @@ rehash:
496 goto out; 676 goto out;
497 677
498 mp->br = br; 678 mp->br = br;
499 mp->addr = group; 679 mp->addr = *group;
500 setup_timer(&mp->timer, br_multicast_group_expired, 680 setup_timer(&mp->timer, br_multicast_group_expired,
501 (unsigned long)mp); 681 (unsigned long)mp);
502 setup_timer(&mp->query_timer, br_multicast_group_query_expired, 682 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
@@ -510,7 +690,8 @@ out:
510} 690}
511 691
512static int br_multicast_add_group(struct net_bridge *br, 692static int br_multicast_add_group(struct net_bridge *br,
513 struct net_bridge_port *port, __be32 group) 693 struct net_bridge_port *port,
694 struct br_ip *group)
514{ 695{
515 struct net_bridge_mdb_entry *mp; 696 struct net_bridge_mdb_entry *mp;
516 struct net_bridge_port_group *p; 697 struct net_bridge_port_group *p;
@@ -518,9 +699,6 @@ static int br_multicast_add_group(struct net_bridge *br,
518 unsigned long now = jiffies; 699 unsigned long now = jiffies;
519 int err; 700 int err;
520 701
521 if (ipv4_is_local_multicast(group))
522 return 0;
523
524 spin_lock(&br->multicast_lock); 702 spin_lock(&br->multicast_lock);
525 if (!netif_running(br->dev) || 703 if (!netif_running(br->dev) ||
526 (port && port->state == BR_STATE_DISABLED)) 704 (port && port->state == BR_STATE_DISABLED))
@@ -549,7 +727,7 @@ static int br_multicast_add_group(struct net_bridge *br,
549 if (unlikely(!p)) 727 if (unlikely(!p))
550 goto err; 728 goto err;
551 729
552 p->addr = group; 730 p->addr = *group;
553 p->port = port; 731 p->port = port;
554 p->next = *pp; 732 p->next = *pp;
555 hlist_add_head(&p->mglist, &port->mglist); 733 hlist_add_head(&p->mglist, &port->mglist);
@@ -570,6 +748,38 @@ err:
570 return err; 748 return err;
571} 749}
572 750
751static int br_ip4_multicast_add_group(struct net_bridge *br,
752 struct net_bridge_port *port,
753 __be32 group)
754{
755 struct br_ip br_group;
756
757 if (ipv4_is_local_multicast(group))
758 return 0;
759
760 br_group.u.ip4 = group;
761 br_group.proto = htons(ETH_P_IP);
762
763 return br_multicast_add_group(br, port, &br_group);
764}
765
766#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
767static int br_ip6_multicast_add_group(struct net_bridge *br,
768 struct net_bridge_port *port,
769 const struct in6_addr *group)
770{
771 struct br_ip br_group;
772
773 if (ipv6_is_local_multicast(group))
774 return 0;
775
776 ipv6_addr_copy(&br_group.u.ip6, group);
777 br_group.proto = htons(ETH_P_IP);
778
779 return br_multicast_add_group(br, port, &br_group);
780}
781#endif
782
573static void br_multicast_router_expired(unsigned long data) 783static void br_multicast_router_expired(unsigned long data)
574{ 784{
575 struct net_bridge_port *port = (void *)data; 785 struct net_bridge_port *port = (void *)data;
@@ -591,19 +801,15 @@ static void br_multicast_local_router_expired(unsigned long data)
591{ 801{
592} 802}
593 803
594static void br_multicast_send_query(struct net_bridge *br, 804static void __br_multicast_send_query(struct net_bridge *br,
595 struct net_bridge_port *port, u32 sent) 805 struct net_bridge_port *port,
806 struct br_ip *ip)
596{ 807{
597 unsigned long time;
598 struct sk_buff *skb; 808 struct sk_buff *skb;
599 809
600 if (!netif_running(br->dev) || br->multicast_disabled || 810 skb = br_multicast_alloc_query(br, ip);
601 timer_pending(&br->multicast_querier_timer))
602 return;
603
604 skb = br_multicast_alloc_query(br, 0);
605 if (!skb) 811 if (!skb)
606 goto timer; 812 return;
607 813
608 if (port) { 814 if (port) {
609 __skb_push(skb, sizeof(struct ethhdr)); 815 __skb_push(skb, sizeof(struct ethhdr));
@@ -612,8 +818,28 @@ static void br_multicast_send_query(struct net_bridge *br,
612 dev_queue_xmit); 818 dev_queue_xmit);
613 } else 819 } else
614 netif_rx(skb); 820 netif_rx(skb);
821}
822
823static void br_multicast_send_query(struct net_bridge *br,
824 struct net_bridge_port *port, u32 sent)
825{
826 unsigned long time;
827 struct br_ip br_group;
828
829 if (!netif_running(br->dev) || br->multicast_disabled ||
830 timer_pending(&br->multicast_querier_timer))
831 return;
832
833 memset(&br_group.u, 0, sizeof(br_group.u));
834
835 br_group.proto = htons(ETH_P_IP);
836 __br_multicast_send_query(br, port, &br_group);
837
838#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
839 br_group.proto = htons(ETH_P_IPV6);
840 __br_multicast_send_query(br, port, &br_group);
841#endif
615 842
616timer:
617 time = jiffies; 843 time = jiffies;
618 time += sent < br->multicast_startup_query_count ? 844 time += sent < br->multicast_startup_query_count ?
619 br->multicast_startup_query_interval : 845 br->multicast_startup_query_interval :
@@ -698,9 +924,9 @@ void br_multicast_disable_port(struct net_bridge_port *port)
698 spin_unlock(&br->multicast_lock); 924 spin_unlock(&br->multicast_lock);
699} 925}
700 926
701static int br_multicast_igmp3_report(struct net_bridge *br, 927static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
702 struct net_bridge_port *port, 928 struct net_bridge_port *port,
703 struct sk_buff *skb) 929 struct sk_buff *skb)
704{ 930{
705 struct igmpv3_report *ih; 931 struct igmpv3_report *ih;
706 struct igmpv3_grec *grec; 932 struct igmpv3_grec *grec;
@@ -727,7 +953,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
727 group = grec->grec_mca; 953 group = grec->grec_mca;
728 type = grec->grec_type; 954 type = grec->grec_type;
729 955
730 len += grec->grec_nsrcs * 4; 956 len += ntohs(grec->grec_nsrcs) * 4;
731 if (!pskb_may_pull(skb, len)) 957 if (!pskb_may_pull(skb, len))
732 return -EINVAL; 958 return -EINVAL;
733 959
@@ -745,7 +971,7 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
745 continue; 971 continue;
746 } 972 }
747 973
748 err = br_multicast_add_group(br, port, group); 974 err = br_ip4_multicast_add_group(br, port, group);
749 if (err) 975 if (err)
750 break; 976 break;
751 } 977 }
@@ -753,24 +979,87 @@ static int br_multicast_igmp3_report(struct net_bridge *br,
753 return err; 979 return err;
754} 980}
755 981
982#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
983static int br_ip6_multicast_mld2_report(struct net_bridge *br,
984 struct net_bridge_port *port,
985 struct sk_buff *skb)
986{
987 struct icmp6hdr *icmp6h;
988 struct mld2_grec *grec;
989 int i;
990 int len;
991 int num;
992 int err = 0;
993
994 if (!pskb_may_pull(skb, sizeof(*icmp6h)))
995 return -EINVAL;
996
997 icmp6h = icmp6_hdr(skb);
998 num = ntohs(icmp6h->icmp6_dataun.un_data16[1]);
999 len = sizeof(*icmp6h);
1000
1001 for (i = 0; i < num; i++) {
1002 __be16 *nsrcs, _nsrcs;
1003
1004 nsrcs = skb_header_pointer(skb,
1005 len + offsetof(struct mld2_grec,
1006 grec_mca),
1007 sizeof(_nsrcs), &_nsrcs);
1008 if (!nsrcs)
1009 return -EINVAL;
1010
1011 if (!pskb_may_pull(skb,
1012 len + sizeof(*grec) +
1013 sizeof(struct in6_addr) * (*nsrcs)))
1014 return -EINVAL;
1015
1016 grec = (struct mld2_grec *)(skb->data + len);
1017 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs);
1018
1019 /* We treat these as MLDv1 reports for now. */
1020 switch (grec->grec_type) {
1021 case MLD2_MODE_IS_INCLUDE:
1022 case MLD2_MODE_IS_EXCLUDE:
1023 case MLD2_CHANGE_TO_INCLUDE:
1024 case MLD2_CHANGE_TO_EXCLUDE:
1025 case MLD2_ALLOW_NEW_SOURCES:
1026 case MLD2_BLOCK_OLD_SOURCES:
1027 break;
1028
1029 default:
1030 continue;
1031 }
1032
1033 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca);
1034 if (!err)
1035 break;
1036 }
1037
1038 return err;
1039}
1040#endif
1041
1042/*
1043 * Add port to rotuer_list
1044 * list is maintained ordered by pointer value
1045 * and locked by br->multicast_lock and RCU
1046 */
756static void br_multicast_add_router(struct net_bridge *br, 1047static void br_multicast_add_router(struct net_bridge *br,
757 struct net_bridge_port *port) 1048 struct net_bridge_port *port)
758{ 1049{
759 struct hlist_node *p; 1050 struct net_bridge_port *p;
760 struct hlist_node **h; 1051 struct hlist_node *n, *slot = NULL;
761 1052
762 for (h = &br->router_list.first; 1053 hlist_for_each_entry(p, n, &br->router_list, rlist) {
763 (p = *h) && 1054 if ((unsigned long) port >= (unsigned long) p)
764 (unsigned long)container_of(p, struct net_bridge_port, rlist) > 1055 break;
765 (unsigned long)port; 1056 slot = n;
766 h = &p->next) 1057 }
767 ; 1058
768 1059 if (slot)
769 port->rlist.pprev = h; 1060 hlist_add_after_rcu(slot, &port->rlist);
770 port->rlist.next = p; 1061 else
771 rcu_assign_pointer(*h, &port->rlist); 1062 hlist_add_head_rcu(&port->rlist, &br->router_list);
772 if (p)
773 p->pprev = &port->rlist.next;
774} 1063}
775 1064
776static void br_multicast_mark_router(struct net_bridge *br, 1065static void br_multicast_mark_router(struct net_bridge *br,
@@ -800,7 +1089,7 @@ timer:
800 1089
801static void br_multicast_query_received(struct net_bridge *br, 1090static void br_multicast_query_received(struct net_bridge *br,
802 struct net_bridge_port *port, 1091 struct net_bridge_port *port,
803 __be32 saddr) 1092 int saddr)
804{ 1093{
805 if (saddr) 1094 if (saddr)
806 mod_timer(&br->multicast_querier_timer, 1095 mod_timer(&br->multicast_querier_timer,
@@ -811,9 +1100,9 @@ static void br_multicast_query_received(struct net_bridge *br,
811 br_multicast_mark_router(br, port); 1100 br_multicast_mark_router(br, port);
812} 1101}
813 1102
814static int br_multicast_query(struct net_bridge *br, 1103static int br_ip4_multicast_query(struct net_bridge *br,
815 struct net_bridge_port *port, 1104 struct net_bridge_port *port,
816 struct sk_buff *skb) 1105 struct sk_buff *skb)
817{ 1106{
818 struct iphdr *iph = ip_hdr(skb); 1107 struct iphdr *iph = ip_hdr(skb);
819 struct igmphdr *ih = igmp_hdr(skb); 1108 struct igmphdr *ih = igmp_hdr(skb);
@@ -831,7 +1120,7 @@ static int br_multicast_query(struct net_bridge *br,
831 (port && port->state == BR_STATE_DISABLED)) 1120 (port && port->state == BR_STATE_DISABLED))
832 goto out; 1121 goto out;
833 1122
834 br_multicast_query_received(br, port, iph->saddr); 1123 br_multicast_query_received(br, port, !!iph->saddr);
835 1124
836 group = ih->group; 1125 group = ih->group;
837 1126
@@ -859,7 +1148,7 @@ static int br_multicast_query(struct net_bridge *br,
859 if (!group) 1148 if (!group)
860 goto out; 1149 goto out;
861 1150
862 mp = br_mdb_ip_get(br->mdb, group); 1151 mp = br_mdb_ip4_get(br->mdb, group);
863 if (!mp) 1152 if (!mp)
864 goto out; 1153 goto out;
865 1154
@@ -883,9 +1172,78 @@ out:
883 return err; 1172 return err;
884} 1173}
885 1174
1175#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1176static int br_ip6_multicast_query(struct net_bridge *br,
1177 struct net_bridge_port *port,
1178 struct sk_buff *skb)
1179{
1180 struct ipv6hdr *ip6h = ipv6_hdr(skb);
1181 struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
1182 struct net_bridge_mdb_entry *mp;
1183 struct mld2_query *mld2q;
1184 struct net_bridge_port_group *p, **pp;
1185 unsigned long max_delay;
1186 unsigned long now = jiffies;
1187 struct in6_addr *group = NULL;
1188 int err = 0;
1189
1190 spin_lock(&br->multicast_lock);
1191 if (!netif_running(br->dev) ||
1192 (port && port->state == BR_STATE_DISABLED))
1193 goto out;
1194
1195 br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr));
1196
1197 if (skb->len == sizeof(*mld)) {
1198 if (!pskb_may_pull(skb, sizeof(*mld))) {
1199 err = -EINVAL;
1200 goto out;
1201 }
1202 mld = (struct mld_msg *) icmp6_hdr(skb);
1203 max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay));
1204 if (max_delay)
1205 group = &mld->mld_mca;
1206 } else if (skb->len >= sizeof(*mld2q)) {
1207 if (!pskb_may_pull(skb, sizeof(*mld2q))) {
1208 err = -EINVAL;
1209 goto out;
1210 }
1211 mld2q = (struct mld2_query *)icmp6_hdr(skb);
1212 if (!mld2q->mld2q_nsrcs)
1213 group = &mld2q->mld2q_mca;
1214 max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1;
1215 }
1216
1217 if (!group)
1218 goto out;
1219
1220 mp = br_mdb_ip6_get(br->mdb, group);
1221 if (!mp)
1222 goto out;
1223
1224 max_delay *= br->multicast_last_member_count;
1225 if (!hlist_unhashed(&mp->mglist) &&
1226 (timer_pending(&mp->timer) ?
1227 time_after(mp->timer.expires, now + max_delay) :
1228 try_to_del_timer_sync(&mp->timer) >= 0))
1229 mod_timer(&mp->timer, now + max_delay);
1230
1231 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
1232 if (timer_pending(&p->timer) ?
1233 time_after(p->timer.expires, now + max_delay) :
1234 try_to_del_timer_sync(&p->timer) >= 0)
1235 mod_timer(&mp->timer, now + max_delay);
1236 }
1237
1238out:
1239 spin_unlock(&br->multicast_lock);
1240 return err;
1241}
1242#endif
1243
886static void br_multicast_leave_group(struct net_bridge *br, 1244static void br_multicast_leave_group(struct net_bridge *br,
887 struct net_bridge_port *port, 1245 struct net_bridge_port *port,
888 __be32 group) 1246 struct br_ip *group)
889{ 1247{
890 struct net_bridge_mdb_htable *mdb; 1248 struct net_bridge_mdb_htable *mdb;
891 struct net_bridge_mdb_entry *mp; 1249 struct net_bridge_mdb_entry *mp;
@@ -893,9 +1251,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
893 unsigned long now; 1251 unsigned long now;
894 unsigned long time; 1252 unsigned long time;
895 1253
896 if (ipv4_is_local_multicast(group))
897 return;
898
899 spin_lock(&br->multicast_lock); 1254 spin_lock(&br->multicast_lock);
900 if (!netif_running(br->dev) || 1255 if (!netif_running(br->dev) ||
901 (port && port->state == BR_STATE_DISABLED) || 1256 (port && port->state == BR_STATE_DISABLED) ||
@@ -946,6 +1301,38 @@ out:
946 spin_unlock(&br->multicast_lock); 1301 spin_unlock(&br->multicast_lock);
947} 1302}
948 1303
1304static void br_ip4_multicast_leave_group(struct net_bridge *br,
1305 struct net_bridge_port *port,
1306 __be32 group)
1307{
1308 struct br_ip br_group;
1309
1310 if (ipv4_is_local_multicast(group))
1311 return;
1312
1313 br_group.u.ip4 = group;
1314 br_group.proto = htons(ETH_P_IP);
1315
1316 br_multicast_leave_group(br, port, &br_group);
1317}
1318
1319#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1320static void br_ip6_multicast_leave_group(struct net_bridge *br,
1321 struct net_bridge_port *port,
1322 const struct in6_addr *group)
1323{
1324 struct br_ip br_group;
1325
1326 if (ipv6_is_local_multicast(group))
1327 return;
1328
1329 ipv6_addr_copy(&br_group.u.ip6, group);
1330 br_group.proto = htons(ETH_P_IPV6);
1331
1332 br_multicast_leave_group(br, port, &br_group);
1333}
1334#endif
1335
949static int br_multicast_ipv4_rcv(struct net_bridge *br, 1336static int br_multicast_ipv4_rcv(struct net_bridge *br,
950 struct net_bridge_port *port, 1337 struct net_bridge_port *port,
951 struct sk_buff *skb) 1338 struct sk_buff *skb)
@@ -957,9 +1344,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
957 unsigned offset; 1344 unsigned offset;
958 int err; 1345 int err;
959 1346
960 BR_INPUT_SKB_CB(skb)->igmp = 0;
961 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
962
963 /* We treat OOM as packet loss for now. */ 1347 /* We treat OOM as packet loss for now. */
964 if (!pskb_may_pull(skb, sizeof(*iph))) 1348 if (!pskb_may_pull(skb, sizeof(*iph)))
965 return -EINVAL; 1349 return -EINVAL;
@@ -1023,16 +1407,16 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1023 case IGMP_HOST_MEMBERSHIP_REPORT: 1407 case IGMP_HOST_MEMBERSHIP_REPORT:
1024 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1408 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1025 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1409 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1026 err = br_multicast_add_group(br, port, ih->group); 1410 err = br_ip4_multicast_add_group(br, port, ih->group);
1027 break; 1411 break;
1028 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1412 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1029 err = br_multicast_igmp3_report(br, port, skb2); 1413 err = br_ip4_multicast_igmp3_report(br, port, skb2);
1030 break; 1414 break;
1031 case IGMP_HOST_MEMBERSHIP_QUERY: 1415 case IGMP_HOST_MEMBERSHIP_QUERY:
1032 err = br_multicast_query(br, port, skb2); 1416 err = br_ip4_multicast_query(br, port, skb2);
1033 break; 1417 break;
1034 case IGMP_HOST_LEAVE_MESSAGE: 1418 case IGMP_HOST_LEAVE_MESSAGE:
1035 br_multicast_leave_group(br, port, ih->group); 1419 br_ip4_multicast_leave_group(br, port, ih->group);
1036 break; 1420 break;
1037 } 1421 }
1038 1422
@@ -1044,15 +1428,139 @@ err_out:
1044 return err; 1428 return err;
1045} 1429}
1046 1430
1431#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1432static int br_multicast_ipv6_rcv(struct net_bridge *br,
1433 struct net_bridge_port *port,
1434 struct sk_buff *skb)
1435{
1436 struct sk_buff *skb2 = skb;
1437 struct ipv6hdr *ip6h;
1438 struct icmp6hdr *icmp6h;
1439 u8 nexthdr;
1440 unsigned len;
1441 unsigned offset;
1442 int err;
1443
1444 if (!pskb_may_pull(skb, sizeof(*ip6h)))
1445 return -EINVAL;
1446
1447 ip6h = ipv6_hdr(skb);
1448
1449 /*
1450 * We're interested in MLD messages only.
1451 * - Version is 6
1452 * - MLD has always Router Alert hop-by-hop option
1453 * - But we do not support jumbrograms.
1454 */
1455 if (ip6h->version != 6 ||
1456 ip6h->nexthdr != IPPROTO_HOPOPTS ||
1457 ip6h->payload_len == 0)
1458 return 0;
1459
1460 len = ntohs(ip6h->payload_len);
1461 if (skb->len < len)
1462 return -EINVAL;
1463
1464 nexthdr = ip6h->nexthdr;
1465 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
1466
1467 if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
1468 return 0;
1469
1470 /* Okay, we found ICMPv6 header */
1471 skb2 = skb_clone(skb, GFP_ATOMIC);
1472 if (!skb2)
1473 return -ENOMEM;
1474
1475 len -= offset - skb_network_offset(skb2);
1476
1477 __skb_pull(skb2, offset);
1478 skb_reset_transport_header(skb2);
1479
1480 err = -EINVAL;
1481 if (!pskb_may_pull(skb2, sizeof(*icmp6h)))
1482 goto out;
1483
1484 icmp6h = icmp6_hdr(skb2);
1485
1486 switch (icmp6h->icmp6_type) {
1487 case ICMPV6_MGM_QUERY:
1488 case ICMPV6_MGM_REPORT:
1489 case ICMPV6_MGM_REDUCTION:
1490 case ICMPV6_MLD2_REPORT:
1491 break;
1492 default:
1493 err = 0;
1494 goto out;
1495 }
1496
1497 /* Okay, we found MLD message. Check further. */
1498 if (skb2->len > len) {
1499 err = pskb_trim_rcsum(skb2, len);
1500 if (err)
1501 goto out;
1502 }
1503
1504 switch (skb2->ip_summed) {
1505 case CHECKSUM_COMPLETE:
1506 if (!csum_fold(skb2->csum))
1507 break;
1508 /*FALLTHROUGH*/
1509 case CHECKSUM_NONE:
1510 skb2->csum = 0;
1511 if (skb_checksum_complete(skb2))
1512 goto out;
1513 }
1514
1515 err = 0;
1516
1517 BR_INPUT_SKB_CB(skb)->igmp = 1;
1518
1519 switch (icmp6h->icmp6_type) {
1520 case ICMPV6_MGM_REPORT:
1521 {
1522 struct mld_msg *mld = (struct mld_msg *)icmp6h;
1523 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1524 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1525 break;
1526 }
1527 case ICMPV6_MLD2_REPORT:
1528 err = br_ip6_multicast_mld2_report(br, port, skb2);
1529 break;
1530 case ICMPV6_MGM_QUERY:
1531 err = br_ip6_multicast_query(br, port, skb2);
1532 break;
1533 case ICMPV6_MGM_REDUCTION:
1534 {
1535 struct mld_msg *mld = (struct mld_msg *)icmp6h;
1536 br_ip6_multicast_leave_group(br, port, &mld->mld_mca);
1537 }
1538 }
1539
1540out:
1541 __skb_push(skb2, offset);
1542 if (skb2 != skb)
1543 kfree_skb(skb2);
1544 return err;
1545}
1546#endif
1547
1047int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port, 1548int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1048 struct sk_buff *skb) 1549 struct sk_buff *skb)
1049{ 1550{
1551 BR_INPUT_SKB_CB(skb)->igmp = 0;
1552 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1553
1050 if (br->multicast_disabled) 1554 if (br->multicast_disabled)
1051 return 0; 1555 return 0;
1052 1556
1053 switch (skb->protocol) { 1557 switch (skb->protocol) {
1054 case htons(ETH_P_IP): 1558 case htons(ETH_P_IP):
1055 return br_multicast_ipv4_rcv(br, port, skb); 1559 return br_multicast_ipv4_rcv(br, port, skb);
1560#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1561 case htons(ETH_P_IPV6):
1562 return br_multicast_ipv6_rcv(br, port, skb);
1563#endif
1056 } 1564 }
1057 1565
1058 return 0; 1566 return 0;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 791d4ab0fd4d..3d2d3fe0a97e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -45,6 +45,17 @@ struct mac_addr
45 unsigned char addr[6]; 45 unsigned char addr[6];
46}; 46};
47 47
48struct br_ip
49{
50 union {
51 __be32 ip4;
52#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
53 struct in6_addr ip6;
54#endif
55 } u;
56 __be16 proto;
57};
58
48struct net_bridge_fdb_entry 59struct net_bridge_fdb_entry
49{ 60{
50 struct hlist_node hlist; 61 struct hlist_node hlist;
@@ -64,7 +75,7 @@ struct net_bridge_port_group {
64 struct rcu_head rcu; 75 struct rcu_head rcu;
65 struct timer_list timer; 76 struct timer_list timer;
66 struct timer_list query_timer; 77 struct timer_list query_timer;
67 __be32 addr; 78 struct br_ip addr;
68 u32 queries_sent; 79 u32 queries_sent;
69}; 80};
70 81
@@ -77,7 +88,7 @@ struct net_bridge_mdb_entry
77 struct rcu_head rcu; 88 struct rcu_head rcu;
78 struct timer_list timer; 89 struct timer_list timer;
79 struct timer_list query_timer; 90 struct timer_list query_timer;
80 __be32 addr; 91 struct br_ip addr;
81 u32 queries_sent; 92 u32 queries_sent;
82}; 93};
83 94
@@ -130,19 +141,20 @@ struct net_bridge_port
130#endif 141#endif
131}; 142};
132 143
144struct br_cpu_netstats {
145 unsigned long rx_packets;
146 unsigned long rx_bytes;
147 unsigned long tx_packets;
148 unsigned long tx_bytes;
149};
150
133struct net_bridge 151struct net_bridge
134{ 152{
135 spinlock_t lock; 153 spinlock_t lock;
136 struct list_head port_list; 154 struct list_head port_list;
137 struct net_device *dev; 155 struct net_device *dev;
138 156
139 struct br_cpu_netstats __percpu { 157 struct br_cpu_netstats __percpu *stats;
140 unsigned long rx_packets;
141 unsigned long rx_bytes;
142 unsigned long tx_packets;
143 unsigned long tx_bytes;
144 } *stats;
145
146 spinlock_t hash_lock; 158 spinlock_t hash_lock;
147 struct hlist_head hash[BR_HASH_SIZE]; 159 struct hlist_head hash[BR_HASH_SIZE];
148 unsigned long feature_mask; 160 unsigned long feature_mask;
@@ -241,6 +253,8 @@ static inline int br_is_root_bridge(const struct net_bridge *br)
241extern void br_dev_setup(struct net_device *dev); 253extern void br_dev_setup(struct net_device *dev);
242extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, 254extern netdev_tx_t br_dev_xmit(struct sk_buff *skb,
243 struct net_device *dev); 255 struct net_device *dev);
256extern bool br_devices_support_netpoll(struct net_bridge *br);
257extern void br_netpoll_cleanup(struct net_device *br_dev);
244 258
245/* br_fdb.c */ 259/* br_fdb.c */
246extern int br_fdb_init(void); 260extern int br_fdb_init(void);