aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /net/bridge
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'net/bridge')
-rw-r--r--net/bridge/Kconfig14
-rw-r--r--net/bridge/Makefile2
-rw-r--r--net/bridge/br_device.c24
-rw-r--r--net/bridge/br_fdb.c5
-rw-r--r--net/bridge/br_forward.c167
-rw-r--r--net/bridge/br_if.c32
-rw-r--r--net/bridge/br_input.c42
-rw-r--r--net/bridge/br_ioctl.c5
-rw-r--r--net/bridge/br_multicast.c1309
-rw-r--r--net/bridge/br_netfilter.c7
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/br_private.h185
-rw-r--r--net/bridge/br_stp.c2
-rw-r--r--net/bridge/br_stp_bpdu.c1
-rw-r--r--net/bridge/br_stp_if.c1
-rw-r--r--net/bridge/br_sysfs_br.c287
-rw-r--r--net/bridge/br_sysfs_if.c20
-rw-r--r--net/bridge/netfilter/ebt_802_3.c2
-rw-r--r--net/bridge/netfilter/ebt_arp.c2
-rw-r--r--net/bridge/netfilter/ebt_arpreply.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_ip.c2
-rw-r--r--net/bridge/netfilter/ebt_ip6.c2
-rw-r--r--net/bridge/netfilter/ebt_limit.c18
-rw-r--r--net/bridge/netfilter/ebt_log.c2
-rw-r--r--net/bridge/netfilter/ebt_mark.c33
-rw-r--r--net/bridge/netfilter/ebt_mark_m.c39
-rw-r--r--net/bridge/netfilter/ebt_nflog.c2
-rw-r--r--net/bridge/netfilter/ebt_pkttype.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c2
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/bridge/netfilter/ebt_stp.c6
-rw-r--r--net/bridge/netfilter/ebt_ulog.c3
-rw-r--r--net/bridge/netfilter/ebt_vlan.c2
-rw-r--r--net/bridge/netfilter/ebtable_broute.c2
-rw-r--r--net/bridge/netfilter/ebtable_filter.c2
-rw-r--r--net/bridge/netfilter/ebtable_nat.c2
-rw-r--r--net/bridge/netfilter/ebtables.c1248
38 files changed, 3234 insertions, 247 deletions
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index e143ca678881..d115d5cea5b6 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -31,3 +31,17 @@ config BRIDGE
31 will be called bridge. 31 will be called bridge.
32 32
33 If unsure, say N. 33 If unsure, say N.
34
35config BRIDGE_IGMP_SNOOPING
36 bool "IGMP snooping"
37 depends on BRIDGE
38 depends on INET
39 default y
40 ---help---
41 If you say Y here, then the Ethernet bridge will be able selectively
42 forward multicast traffic based on IGMP traffic received from each
43 port.
44
45 Say N to exclude this support and reduce the binary size.
46
47 If unsure, say Y.
diff --git a/net/bridge/Makefile b/net/bridge/Makefile
index f444c12cde5a..d0359ea8ee79 100644
--- a/net/bridge/Makefile
+++ b/net/bridge/Makefile
@@ -12,4 +12,6 @@ bridge-$(CONFIG_SYSFS) += br_sysfs_if.o br_sysfs_br.o
12 12
13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o 13bridge-$(CONFIG_BRIDGE_NETFILTER) += br_netfilter.o
14 14
15bridge-$(CONFIG_BRIDGE_IGMP_SNOOPING) += br_multicast.o
16
15obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/ 17obj-$(CONFIG_BRIDGE_NF_EBTABLES) += netfilter/
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 07a07770c8b6..90a9024e5c1e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -25,6 +25,9 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
25 struct net_bridge *br = netdev_priv(dev); 25 struct net_bridge *br = netdev_priv(dev);
26 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
27 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
28 struct net_bridge_mdb_entry *mdst;
29
30 BR_INPUT_SKB_CB(skb)->brdev = dev;
28 31
29 dev->stats.tx_packets++; 32 dev->stats.tx_packets++;
30 dev->stats.tx_bytes += skb->len; 33 dev->stats.tx_bytes += skb->len;
@@ -32,13 +35,21 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
32 skb_reset_mac_header(skb); 35 skb_reset_mac_header(skb);
33 skb_pull(skb, ETH_HLEN); 36 skb_pull(skb, ETH_HLEN);
34 37
35 if (dest[0] & 1) 38 if (dest[0] & 1) {
36 br_flood_deliver(br, skb); 39 if (br_multicast_rcv(br, NULL, skb))
37 else if ((dst = __br_fdb_get(br, dest)) != NULL) 40 goto out;
41
42 mdst = br_mdb_get(br, skb);
43 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
44 br_multicast_deliver(mdst, skb);
45 else
46 br_flood_deliver(br, skb);
47 } else if ((dst = __br_fdb_get(br, dest)) != NULL)
38 br_deliver(dst->dst, skb); 48 br_deliver(dst->dst, skb);
39 else 49 else
40 br_flood_deliver(br, skb); 50 br_flood_deliver(br, skb);
41 51
52out:
42 return NETDEV_TX_OK; 53 return NETDEV_TX_OK;
43} 54}
44 55
@@ -49,6 +60,7 @@ static int br_dev_open(struct net_device *dev)
49 br_features_recompute(br); 60 br_features_recompute(br);
50 netif_start_queue(dev); 61 netif_start_queue(dev);
51 br_stp_enable_bridge(br); 62 br_stp_enable_bridge(br);
63 br_multicast_open(br);
52 64
53 return 0; 65 return 0;
54} 66}
@@ -59,7 +71,10 @@ static void br_dev_set_multicast_list(struct net_device *dev)
59 71
60static int br_dev_stop(struct net_device *dev) 72static int br_dev_stop(struct net_device *dev)
61{ 73{
62 br_stp_disable_bridge(netdev_priv(dev)); 74 struct net_bridge *br = netdev_priv(dev);
75
76 br_stp_disable_bridge(br);
77 br_multicast_stop(br);
63 78
64 netif_stop_queue(dev); 79 netif_stop_queue(dev);
65 80
@@ -157,6 +172,7 @@ static const struct ethtool_ops br_ethtool_ops = {
157 .get_tso = ethtool_op_get_tso, 172 .get_tso = ethtool_op_get_tso,
158 .set_tso = br_set_tso, 173 .set_tso = br_set_tso,
159 .get_ufo = ethtool_op_get_ufo, 174 .get_ufo = ethtool_op_get_ufo,
175 .set_ufo = ethtool_op_set_ufo,
160 .get_flags = ethtool_op_get_flags, 176 .get_flags = ethtool_op_get_flags,
161}; 177};
162 178
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 57bf05c353bc..9101a4e56201 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -20,6 +20,7 @@
20#include <linux/etherdevice.h> 20#include <linux/etherdevice.h>
21#include <linux/jhash.h> 21#include <linux/jhash.h>
22#include <linux/random.h> 22#include <linux/random.h>
23#include <linux/slab.h>
23#include <asm/atomic.h> 24#include <asm/atomic.h>
24#include <asm/unaligned.h> 25#include <asm/unaligned.h>
25#include "br_private.h" 26#include "br_private.h"
@@ -60,8 +61,8 @@ static inline unsigned long hold_time(const struct net_bridge *br)
60static inline int has_expired(const struct net_bridge *br, 61static inline int has_expired(const struct net_bridge *br,
61 const struct net_bridge_fdb_entry *fdb) 62 const struct net_bridge_fdb_entry *fdb)
62{ 63{
63 return !fdb->is_static 64 return !fdb->is_static &&
64 && time_before_eq(fdb->ageing_timer + hold_time(br), jiffies); 65 time_before_eq(fdb->ageing_timer + hold_time(br), jiffies);
65} 66}
66 67
67static inline int br_mac_hash(const unsigned char *mac) 68static inline int br_mac_hash(const unsigned char *mac)
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index bc1704ac6cd9..7a241c396981 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -11,6 +11,8 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/err.h>
15#include <linux/slab.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/netdevice.h> 17#include <linux/netdevice.h>
16#include <linux/skbuff.h> 18#include <linux/skbuff.h>
@@ -18,6 +20,11 @@
18#include <linux/netfilter_bridge.h> 20#include <linux/netfilter_bridge.h>
19#include "br_private.h" 21#include "br_private.h"
20 22
23static int deliver_clone(const struct net_bridge_port *prev,
24 struct sk_buff *skb,
25 void (*__packet_hook)(const struct net_bridge_port *p,
26 struct sk_buff *skb));
27
21/* Don't forward packets to originating port or forwarding diasabled */ 28/* Don't forward packets to originating port or forwarding diasabled */
22static inline int should_deliver(const struct net_bridge_port *p, 29static inline int should_deliver(const struct net_bridge_port *p,
23 const struct sk_buff *skb) 30 const struct sk_buff *skb)
@@ -93,61 +100,167 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
93} 100}
94 101
95/* called with rcu_read_lock */ 102/* called with rcu_read_lock */
96void br_forward(const struct net_bridge_port *to, struct sk_buff *skb) 103void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
97{ 104{
98 if (should_deliver(to, skb)) { 105 if (should_deliver(to, skb)) {
99 __br_forward(to, skb); 106 if (skb0)
107 deliver_clone(to, skb, __br_forward);
108 else
109 __br_forward(to, skb);
100 return; 110 return;
101 } 111 }
102 112
103 kfree_skb(skb); 113 if (!skb0)
114 kfree_skb(skb);
104} 115}
105 116
106/* called under bridge lock */ 117static int deliver_clone(const struct net_bridge_port *prev,
107static void br_flood(struct net_bridge *br, struct sk_buff *skb, 118 struct sk_buff *skb,
119 void (*__packet_hook)(const struct net_bridge_port *p,
120 struct sk_buff *skb))
121{
122 skb = skb_clone(skb, GFP_ATOMIC);
123 if (!skb) {
124 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
125
126 dev->stats.tx_dropped++;
127 return -ENOMEM;
128 }
129
130 __packet_hook(prev, skb);
131 return 0;
132}
133
134static struct net_bridge_port *maybe_deliver(
135 struct net_bridge_port *prev, struct net_bridge_port *p,
136 struct sk_buff *skb,
108 void (*__packet_hook)(const struct net_bridge_port *p, 137 void (*__packet_hook)(const struct net_bridge_port *p,
109 struct sk_buff *skb)) 138 struct sk_buff *skb))
110{ 139{
140 int err;
141
142 if (!should_deliver(p, skb))
143 return prev;
144
145 if (!prev)
146 goto out;
147
148 err = deliver_clone(prev, skb, __packet_hook);
149 if (err)
150 return ERR_PTR(err);
151
152out:
153 return p;
154}
155
156/* called under bridge lock */
157static void br_flood(struct net_bridge *br, struct sk_buff *skb,
158 struct sk_buff *skb0,
159 void (*__packet_hook)(const struct net_bridge_port *p,
160 struct sk_buff *skb))
161{
111 struct net_bridge_port *p; 162 struct net_bridge_port *p;
112 struct net_bridge_port *prev; 163 struct net_bridge_port *prev;
113 164
114 prev = NULL; 165 prev = NULL;
115 166
116 list_for_each_entry_rcu(p, &br->port_list, list) { 167 list_for_each_entry_rcu(p, &br->port_list, list) {
117 if (should_deliver(p, skb)) { 168 prev = maybe_deliver(prev, p, skb, __packet_hook);
118 if (prev != NULL) { 169 if (IS_ERR(prev))
119 struct sk_buff *skb2; 170 goto out;
120
121 if ((skb2 = skb_clone(skb, GFP_ATOMIC)) == NULL) {
122 br->dev->stats.tx_dropped++;
123 kfree_skb(skb);
124 return;
125 }
126
127 __packet_hook(prev, skb2);
128 }
129
130 prev = p;
131 }
132 } 171 }
133 172
134 if (prev != NULL) { 173 if (!prev)
174 goto out;
175
176 if (skb0)
177 deliver_clone(prev, skb, __packet_hook);
178 else
135 __packet_hook(prev, skb); 179 __packet_hook(prev, skb);
136 return; 180 return;
137 }
138 181
139 kfree_skb(skb); 182out:
183 if (!skb0)
184 kfree_skb(skb);
140} 185}
141 186
142 187
143/* called with rcu_read_lock */ 188/* called with rcu_read_lock */
144void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb) 189void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
145{ 190{
146 br_flood(br, skb, __br_deliver); 191 br_flood(br, skb, NULL, __br_deliver);
147} 192}
148 193
149/* called under bridge lock */ 194/* called under bridge lock */
150void br_flood_forward(struct net_bridge *br, struct sk_buff *skb) 195void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
196 struct sk_buff *skb2)
197{
198 br_flood(br, skb, skb2, __br_forward);
199}
200
201#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
202/* called with rcu_read_lock */
203static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
204 struct sk_buff *skb, struct sk_buff *skb0,
205 void (*__packet_hook)(
206 const struct net_bridge_port *p,
207 struct sk_buff *skb))
208{
209 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
210 struct net_bridge *br = netdev_priv(dev);
211 struct net_bridge_port *port;
212 struct net_bridge_port *lport, *rport;
213 struct net_bridge_port *prev;
214 struct net_bridge_port_group *p;
215 struct hlist_node *rp;
216
217 prev = NULL;
218
219 rp = br->router_list.first;
220 p = mdst ? mdst->ports : NULL;
221 while (p || rp) {
222 lport = p ? p->port : NULL;
223 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
224 NULL;
225
226 port = (unsigned long)lport > (unsigned long)rport ?
227 lport : rport;
228
229 prev = maybe_deliver(prev, port, skb, __packet_hook);
230 if (IS_ERR(prev))
231 goto out;
232
233 if ((unsigned long)lport >= (unsigned long)port)
234 p = p->next;
235 if ((unsigned long)rport >= (unsigned long)port)
236 rp = rp->next;
237 }
238
239 if (!prev)
240 goto out;
241
242 if (skb0)
243 deliver_clone(prev, skb, __packet_hook);
244 else
245 __packet_hook(prev, skb);
246 return;
247
248out:
249 if (!skb0)
250 kfree_skb(skb);
251}
252
253/* called with rcu_read_lock */
254void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
255 struct sk_buff *skb)
256{
257 br_multicast_flood(mdst, skb, NULL, __br_deliver);
258}
259
260/* called with rcu_read_lock */
261void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
262 struct sk_buff *skb, struct sk_buff *skb2)
151{ 263{
152 br_flood(br, skb, __br_forward); 264 br_multicast_flood(mdst, skb, skb2, __br_forward);
153} 265}
266#endif
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 4a9f52732655..0b6b1f2ff7ac 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/rtnetlink.h> 20#include <linux/rtnetlink.h>
21#include <linux/if_ether.h> 21#include <linux/if_ether.h>
22#include <linux/slab.h>
22#include <net/sock.h> 23#include <net/sock.h>
23 24
24#include "br_private.h" 25#include "br_private.h"
@@ -147,6 +148,8 @@ static void del_nbp(struct net_bridge_port *p)
147 148
148 rcu_assign_pointer(dev->br_port, NULL); 149 rcu_assign_pointer(dev->br_port, NULL);
149 150
151 br_multicast_del_port(p);
152
150 kobject_uevent(&p->kobj, KOBJ_REMOVE); 153 kobject_uevent(&p->kobj, KOBJ_REMOVE);
151 kobject_del(&p->kobj); 154 kobject_del(&p->kobj);
152 155
@@ -154,7 +157,7 @@ static void del_nbp(struct net_bridge_port *p)
154} 157}
155 158
156/* called with RTNL */ 159/* called with RTNL */
157static void del_br(struct net_bridge *br) 160static void del_br(struct net_bridge *br, struct list_head *head)
158{ 161{
159 struct net_bridge_port *p, *n; 162 struct net_bridge_port *p, *n;
160 163
@@ -165,7 +168,7 @@ static void del_br(struct net_bridge *br)
165 del_timer_sync(&br->gc_timer); 168 del_timer_sync(&br->gc_timer);
166 169
167 br_sysfs_delbr(br->dev); 170 br_sysfs_delbr(br->dev);
168 unregister_netdevice(br->dev); 171 unregister_netdevice_queue(br->dev, head);
169} 172}
170 173
171static struct net_device *new_bridge_dev(struct net *net, const char *name) 174static struct net_device *new_bridge_dev(struct net *net, const char *name)
@@ -206,9 +209,8 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
206 209
207 br_netfilter_rtable_init(br); 210 br_netfilter_rtable_init(br);
208 211
209 INIT_LIST_HEAD(&br->age_list);
210
211 br_stp_timer_init(br); 212 br_stp_timer_init(br);
213 br_multicast_init(br);
212 214
213 return dev; 215 return dev;
214} 216}
@@ -260,6 +262,7 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
260 br_init_port(p); 262 br_init_port(p);
261 p->state = BR_STATE_DISABLED; 263 p->state = BR_STATE_DISABLED;
262 br_stp_port_timer_init(p); 264 br_stp_port_timer_init(p);
265 br_multicast_add_port(p);
263 266
264 return p; 267 return p;
265} 268}
@@ -323,7 +326,7 @@ int br_del_bridge(struct net *net, const char *name)
323 } 326 }
324 327
325 else 328 else
326 del_br(netdev_priv(dev)); 329 del_br(netdev_priv(dev), NULL);
327 330
328 rtnl_unlock(); 331 rtnl_unlock();
329 return ret; 332 return ret;
@@ -390,6 +393,10 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
390 if (dev->br_port != NULL) 393 if (dev->br_port != NULL)
391 return -EBUSY; 394 return -EBUSY;
392 395
396 /* No bridging devices that dislike that (e.g. wireless) */
397 if (dev->priv_flags & IFF_DONT_BRIDGE)
398 return -EOPNOTSUPP;
399
393 p = new_nbp(br, dev); 400 p = new_nbp(br, dev);
394 if (IS_ERR(p)) 401 if (IS_ERR(p))
395 return PTR_ERR(p); 402 return PTR_ERR(p);
@@ -463,18 +470,17 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
463 return 0; 470 return 0;
464} 471}
465 472
466void br_net_exit(struct net *net) 473void __net_exit br_net_exit(struct net *net)
467{ 474{
468 struct net_device *dev; 475 struct net_device *dev;
476 LIST_HEAD(list);
469 477
470 rtnl_lock(); 478 rtnl_lock();
471restart: 479 for_each_netdev(net, dev)
472 for_each_netdev(net, dev) { 480 if (dev->priv_flags & IFF_EBRIDGE)
473 if (dev->priv_flags & IFF_EBRIDGE) { 481 del_br(netdev_priv(dev), &list);
474 del_br(netdev_priv(dev)); 482
475 goto restart; 483 unregister_netdevice_many(&list);
476 }
477 }
478 rtnl_unlock(); 484 rtnl_unlock();
479 485
480} 486}
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 5ee1a3682bf2..a82dde2d2ead 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -11,6 +11,7 @@
11 * 2 of the License, or (at your option) any later version. 11 * 2 of the License, or (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/slab.h>
14#include <linux/kernel.h> 15#include <linux/kernel.h>
15#include <linux/netdevice.h> 16#include <linux/netdevice.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -20,9 +21,9 @@
20/* Bridge group multicast address 802.1d (pg 51). */ 21/* Bridge group multicast address 802.1d (pg 51). */
21const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; 22const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
22 23
23static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb) 24static int br_pass_frame_up(struct sk_buff *skb)
24{ 25{
25 struct net_device *indev, *brdev = br->dev; 26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
26 27
27 brdev->stats.rx_packets++; 28 brdev->stats.rx_packets++;
28 brdev->stats.rx_bytes += skb->len; 29 brdev->stats.rx_bytes += skb->len;
@@ -30,8 +31,8 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
30 indev = skb->dev; 31 indev = skb->dev;
31 skb->dev = brdev; 32 skb->dev = brdev;
32 33
33 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL, 34 return NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
34 netif_receive_skb); 35 netif_receive_skb);
35} 36}
36 37
37/* note: already called with rcu_read_lock (preempt_disabled) */ 38/* note: already called with rcu_read_lock (preempt_disabled) */
@@ -41,6 +42,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
41 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); 42 struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
42 struct net_bridge *br; 43 struct net_bridge *br;
43 struct net_bridge_fdb_entry *dst; 44 struct net_bridge_fdb_entry *dst;
45 struct net_bridge_mdb_entry *mdst;
44 struct sk_buff *skb2; 46 struct sk_buff *skb2;
45 47
46 if (!p || p->state == BR_STATE_DISABLED) 48 if (!p || p->state == BR_STATE_DISABLED)
@@ -50,9 +52,15 @@ int br_handle_frame_finish(struct sk_buff *skb)
50 br = p->br; 52 br = p->br;
51 br_fdb_update(br, p, eth_hdr(skb)->h_source); 53 br_fdb_update(br, p, eth_hdr(skb)->h_source);
52 54
55 if (is_multicast_ether_addr(dest) &&
56 br_multicast_rcv(br, p, skb))
57 goto drop;
58
53 if (p->state == BR_STATE_LEARNING) 59 if (p->state == BR_STATE_LEARNING)
54 goto drop; 60 goto drop;
55 61
62 BR_INPUT_SKB_CB(skb)->brdev = br->dev;
63
56 /* The packet skb2 goes to the local host (NULL to skip). */ 64 /* The packet skb2 goes to the local host (NULL to skip). */
57 skb2 = NULL; 65 skb2 = NULL;
58 66
@@ -62,27 +70,35 @@ int br_handle_frame_finish(struct sk_buff *skb)
62 dst = NULL; 70 dst = NULL;
63 71
64 if (is_multicast_ether_addr(dest)) { 72 if (is_multicast_ether_addr(dest)) {
73 mdst = br_mdb_get(br, skb);
74 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
75 if ((mdst && !hlist_unhashed(&mdst->mglist)) ||
76 br_multicast_is_router(br))
77 skb2 = skb;
78 br_multicast_forward(mdst, skb, skb2);
79 skb = NULL;
80 if (!skb2)
81 goto out;
82 } else
83 skb2 = skb;
84
65 br->dev->stats.multicast++; 85 br->dev->stats.multicast++;
66 skb2 = skb;
67 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { 86 } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) {
68 skb2 = skb; 87 skb2 = skb;
69 /* Do not forward the packet since it's local. */ 88 /* Do not forward the packet since it's local. */
70 skb = NULL; 89 skb = NULL;
71 } 90 }
72 91
73 if (skb2 == skb)
74 skb2 = skb_clone(skb, GFP_ATOMIC);
75
76 if (skb2)
77 br_pass_frame_up(br, skb2);
78
79 if (skb) { 92 if (skb) {
80 if (dst) 93 if (dst)
81 br_forward(dst->dst, skb); 94 br_forward(dst->dst, skb, skb2);
82 else 95 else
83 br_flood_forward(br, skb); 96 br_flood_forward(br, skb, skb2);
84 } 97 }
85 98
99 if (skb2)
100 return br_pass_frame_up(skb2);
101
86out: 102out:
87 return 0; 103 return 0;
88drop: 104drop:
diff --git a/net/bridge/br_ioctl.c b/net/bridge/br_ioctl.c
index 6a6433daaf27..995afc4b04dc 100644
--- a/net/bridge/br_ioctl.c
+++ b/net/bridge/br_ioctl.c
@@ -15,6 +15,7 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/if_bridge.h> 16#include <linux/if_bridge.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/slab.h>
18#include <linux/times.h> 19#include <linux/times.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <asm/uaccess.h> 21#include <asm/uaccess.h>
@@ -81,6 +82,7 @@ static int get_fdb_entries(struct net_bridge *br, void __user *userbuf,
81 return num; 82 return num;
82} 83}
83 84
85/* called with RTNL */
84static int add_del_if(struct net_bridge *br, int ifindex, int isadd) 86static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
85{ 87{
86 struct net_device *dev; 88 struct net_device *dev;
@@ -89,7 +91,7 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
89 if (!capable(CAP_NET_ADMIN)) 91 if (!capable(CAP_NET_ADMIN))
90 return -EPERM; 92 return -EPERM;
91 93
92 dev = dev_get_by_index(dev_net(br->dev), ifindex); 94 dev = __dev_get_by_index(dev_net(br->dev), ifindex);
93 if (dev == NULL) 95 if (dev == NULL)
94 return -EINVAL; 96 return -EINVAL;
95 97
@@ -98,7 +100,6 @@ static int add_del_if(struct net_bridge *br, int ifindex, int isadd)
98 else 100 else
99 ret = br_del_if(br, dev); 101 ret = br_del_if(br, dev);
100 102
101 dev_put(dev);
102 return ret; 103 return ret;
103} 104}
104 105
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
new file mode 100644
index 000000000000..eaa0e1bae49b
--- /dev/null
+++ b/net/bridge/br_multicast.c
@@ -0,0 +1,1309 @@
1/*
2 * Bridge multicast support.
3 *
4 * Copyright (c) 2010 Herbert Xu <herbert@gondor.apana.org.au>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 *
11 */
12
13#include <linux/err.h>
14#include <linux/if_ether.h>
15#include <linux/igmp.h>
16#include <linux/jhash.h>
17#include <linux/kernel.h>
18#include <linux/log2.h>
19#include <linux/netdevice.h>
20#include <linux/netfilter_bridge.h>
21#include <linux/random.h>
22#include <linux/rculist.h>
23#include <linux/skbuff.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <net/ip.h>
27
28#include "br_private.h"
29
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
31{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1);
33}
34
35static struct net_bridge_mdb_entry *__br_mdb_ip_get(
36 struct net_bridge_mdb_htable *mdb, __be32 dst, int hash)
37{
38 struct net_bridge_mdb_entry *mp;
39 struct hlist_node *p;
40
41 hlist_for_each_entry_rcu(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
42 if (dst == mp->addr)
43 return mp;
44 }
45
46 return NULL;
47}
48
49static struct net_bridge_mdb_entry *br_mdb_ip_get(
50 struct net_bridge_mdb_htable *mdb, __be32 dst)
51{
52 if (!mdb)
53 return NULL;
54
55 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
56}
57
58struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
59 struct sk_buff *skb)
60{
61 if (br->multicast_disabled)
62 return NULL;
63
64 switch (skb->protocol) {
65 case htons(ETH_P_IP):
66 if (BR_INPUT_SKB_CB(skb)->igmp)
67 break;
68 return br_mdb_ip_get(br->mdb, ip_hdr(skb)->daddr);
69 }
70
71 return NULL;
72}
73
74static void br_mdb_free(struct rcu_head *head)
75{
76 struct net_bridge_mdb_htable *mdb =
77 container_of(head, struct net_bridge_mdb_htable, rcu);
78 struct net_bridge_mdb_htable *old = mdb->old;
79
80 mdb->old = NULL;
81 kfree(old->mhash);
82 kfree(old);
83}
84
85static int br_mdb_copy(struct net_bridge_mdb_htable *new,
86 struct net_bridge_mdb_htable *old,
87 int elasticity)
88{
89 struct net_bridge_mdb_entry *mp;
90 struct hlist_node *p;
91 int maxlen;
92 int len;
93 int i;
94
95 for (i = 0; i < old->max; i++)
96 hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver])
97 hlist_add_head(&mp->hlist[new->ver],
98 &new->mhash[br_ip_hash(new, mp->addr)]);
99
100 if (!elasticity)
101 return 0;
102
103 maxlen = 0;
104 for (i = 0; i < new->max; i++) {
105 len = 0;
106 hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver])
107 len++;
108 if (len > maxlen)
109 maxlen = len;
110 }
111
112 return maxlen > elasticity ? -EINVAL : 0;
113}
114
115static void br_multicast_free_pg(struct rcu_head *head)
116{
117 struct net_bridge_port_group *p =
118 container_of(head, struct net_bridge_port_group, rcu);
119
120 kfree(p);
121}
122
123static void br_multicast_free_group(struct rcu_head *head)
124{
125 struct net_bridge_mdb_entry *mp =
126 container_of(head, struct net_bridge_mdb_entry, rcu);
127
128 kfree(mp);
129}
130
131static void br_multicast_group_expired(unsigned long data)
132{
133 struct net_bridge_mdb_entry *mp = (void *)data;
134 struct net_bridge *br = mp->br;
135 struct net_bridge_mdb_htable *mdb;
136
137 spin_lock(&br->multicast_lock);
138 if (!netif_running(br->dev) || timer_pending(&mp->timer))
139 goto out;
140
141 if (!hlist_unhashed(&mp->mglist))
142 hlist_del_init(&mp->mglist);
143
144 if (mp->ports)
145 goto out;
146
147 mdb = br->mdb;
148 hlist_del_rcu(&mp->hlist[mdb->ver]);
149 mdb->size--;
150
151 del_timer(&mp->query_timer);
152 call_rcu_bh(&mp->rcu, br_multicast_free_group);
153
154out:
155 spin_unlock(&br->multicast_lock);
156}
157
158static void br_multicast_del_pg(struct net_bridge *br,
159 struct net_bridge_port_group *pg)
160{
161 struct net_bridge_mdb_htable *mdb = br->mdb;
162 struct net_bridge_mdb_entry *mp;
163 struct net_bridge_port_group *p;
164 struct net_bridge_port_group **pp;
165
166 mp = br_mdb_ip_get(mdb, pg->addr);
167 if (WARN_ON(!mp))
168 return;
169
170 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
171 if (p != pg)
172 continue;
173
174 *pp = p->next;
175 hlist_del_init(&p->mglist);
176 del_timer(&p->timer);
177 del_timer(&p->query_timer);
178 call_rcu_bh(&p->rcu, br_multicast_free_pg);
179
180 if (!mp->ports && hlist_unhashed(&mp->mglist) &&
181 netif_running(br->dev))
182 mod_timer(&mp->timer, jiffies);
183
184 return;
185 }
186
187 WARN_ON(1);
188}
189
190static void br_multicast_port_group_expired(unsigned long data)
191{
192 struct net_bridge_port_group *pg = (void *)data;
193 struct net_bridge *br = pg->port->br;
194
195 spin_lock(&br->multicast_lock);
196 if (!netif_running(br->dev) || timer_pending(&pg->timer) ||
197 hlist_unhashed(&pg->mglist))
198 goto out;
199
200 br_multicast_del_pg(br, pg);
201
202out:
203 spin_unlock(&br->multicast_lock);
204}
205
206static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
207 int elasticity)
208{
209 struct net_bridge_mdb_htable *old = *mdbp;
210 struct net_bridge_mdb_htable *mdb;
211 int err;
212
213 mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC);
214 if (!mdb)
215 return -ENOMEM;
216
217 mdb->max = max;
218 mdb->old = old;
219
220 mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC);
221 if (!mdb->mhash) {
222 kfree(mdb);
223 return -ENOMEM;
224 }
225
226 mdb->size = old ? old->size : 0;
227 mdb->ver = old ? old->ver ^ 1 : 0;
228
229 if (!old || elasticity)
230 get_random_bytes(&mdb->secret, sizeof(mdb->secret));
231 else
232 mdb->secret = old->secret;
233
234 if (!old)
235 goto out;
236
237 err = br_mdb_copy(mdb, old, elasticity);
238 if (err) {
239 kfree(mdb->mhash);
240 kfree(mdb);
241 return err;
242 }
243
244 call_rcu_bh(&mdb->rcu, br_mdb_free);
245
246out:
247 rcu_assign_pointer(*mdbp, mdb);
248
249 return 0;
250}
251
252static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
253 __be32 group)
254{
255 struct sk_buff *skb;
256 struct igmphdr *ih;
257 struct ethhdr *eth;
258 struct iphdr *iph;
259
260 skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) +
261 sizeof(*ih) + 4);
262 if (!skb)
263 goto out;
264
265 skb->protocol = htons(ETH_P_IP);
266
267 skb_reset_mac_header(skb);
268 eth = eth_hdr(skb);
269
270 memcpy(eth->h_source, br->dev->dev_addr, 6);
271 eth->h_dest[0] = 1;
272 eth->h_dest[1] = 0;
273 eth->h_dest[2] = 0x5e;
274 eth->h_dest[3] = 0;
275 eth->h_dest[4] = 0;
276 eth->h_dest[5] = 1;
277 eth->h_proto = htons(ETH_P_IP);
278 skb_put(skb, sizeof(*eth));
279
280 skb_set_network_header(skb, skb->len);
281 iph = ip_hdr(skb);
282
283 iph->version = 4;
284 iph->ihl = 6;
285 iph->tos = 0xc0;
286 iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4);
287 iph->id = 0;
288 iph->frag_off = htons(IP_DF);
289 iph->ttl = 1;
290 iph->protocol = IPPROTO_IGMP;
291 iph->saddr = 0;
292 iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
293 ((u8 *)&iph[1])[0] = IPOPT_RA;
294 ((u8 *)&iph[1])[1] = 4;
295 ((u8 *)&iph[1])[2] = 0;
296 ((u8 *)&iph[1])[3] = 0;
297 ip_send_check(iph);
298 skb_put(skb, 24);
299
300 skb_set_transport_header(skb, skb->len);
301 ih = igmp_hdr(skb);
302 ih->type = IGMP_HOST_MEMBERSHIP_QUERY;
303 ih->code = (group ? br->multicast_last_member_interval :
304 br->multicast_query_response_interval) /
305 (HZ / IGMP_TIMER_SCALE);
306 ih->group = group;
307 ih->csum = 0;
308 ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
309 skb_put(skb, sizeof(*ih));
310
311 __skb_pull(skb, sizeof(*eth));
312
313out:
314 return skb;
315}
316
317static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
318{
319 struct net_bridge *br = mp->br;
320 struct sk_buff *skb;
321
322 skb = br_multicast_alloc_query(br, mp->addr);
323 if (!skb)
324 goto timer;
325
326 netif_rx(skb);
327
328timer:
329 if (++mp->queries_sent < br->multicast_last_member_count)
330 mod_timer(&mp->query_timer,
331 jiffies + br->multicast_last_member_interval);
332}
333
334static void br_multicast_group_query_expired(unsigned long data)
335{
336 struct net_bridge_mdb_entry *mp = (void *)data;
337 struct net_bridge *br = mp->br;
338
339 spin_lock(&br->multicast_lock);
340 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) ||
341 mp->queries_sent >= br->multicast_last_member_count)
342 goto out;
343
344 br_multicast_send_group_query(mp);
345
346out:
347 spin_unlock(&br->multicast_lock);
348}
349
350static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
351{
352 struct net_bridge_port *port = pg->port;
353 struct net_bridge *br = port->br;
354 struct sk_buff *skb;
355
356 skb = br_multicast_alloc_query(br, pg->addr);
357 if (!skb)
358 goto timer;
359
360 br_deliver(port, skb);
361
362timer:
363 if (++pg->queries_sent < br->multicast_last_member_count)
364 mod_timer(&pg->query_timer,
365 jiffies + br->multicast_last_member_interval);
366}
367
368static void br_multicast_port_group_query_expired(unsigned long data)
369{
370 struct net_bridge_port_group *pg = (void *)data;
371 struct net_bridge_port *port = pg->port;
372 struct net_bridge *br = port->br;
373
374 spin_lock(&br->multicast_lock);
375 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
376 pg->queries_sent >= br->multicast_last_member_count)
377 goto out;
378
379 br_multicast_send_port_group_query(pg);
380
381out:
382 spin_unlock(&br->multicast_lock);
383}
384
385static struct net_bridge_mdb_entry *br_multicast_get_group(
386 struct net_bridge *br, struct net_bridge_port *port, __be32 group,
387 int hash)
388{
389 struct net_bridge_mdb_htable *mdb = br->mdb;
390 struct net_bridge_mdb_entry *mp;
391 struct hlist_node *p;
392 unsigned count = 0;
393 unsigned max;
394 int elasticity;
395 int err;
396
397 hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
398 count++;
399 if (unlikely(group == mp->addr)) {
400 return mp;
401 }
402 }
403
404 elasticity = 0;
405 max = mdb->max;
406
407 if (unlikely(count > br->hash_elasticity && count)) {
408 if (net_ratelimit())
409 printk(KERN_INFO "%s: Multicast hash table "
410 "chain limit reached: %s\n",
411 br->dev->name, port ? port->dev->name :
412 br->dev->name);
413
414 elasticity = br->hash_elasticity;
415 }
416
417 if (mdb->size >= max) {
418 max *= 2;
419 if (unlikely(max >= br->hash_max)) {
420 printk(KERN_WARNING "%s: Multicast hash table maximum "
421 "reached, disabling snooping: %s, %d\n",
422 br->dev->name, port ? port->dev->name :
423 br->dev->name,
424 max);
425 err = -E2BIG;
426disable:
427 br->multicast_disabled = 1;
428 goto err;
429 }
430 }
431
432 if (max > mdb->max || elasticity) {
433 if (mdb->old) {
434 if (net_ratelimit())
435 printk(KERN_INFO "%s: Multicast hash table "
436 "on fire: %s\n",
437 br->dev->name, port ? port->dev->name :
438 br->dev->name);
439 err = -EEXIST;
440 goto err;
441 }
442
443 err = br_mdb_rehash(&br->mdb, max, elasticity);
444 if (err) {
445 printk(KERN_WARNING "%s: Cannot rehash multicast "
446 "hash table, disabling snooping: "
447 "%s, %d, %d\n",
448 br->dev->name, port ? port->dev->name :
449 br->dev->name,
450 mdb->size, err);
451 goto disable;
452 }
453
454 err = -EAGAIN;
455 goto err;
456 }
457
458 return NULL;
459
460err:
461 mp = ERR_PTR(err);
462 return mp;
463}
464
465static struct net_bridge_mdb_entry *br_multicast_new_group(
466 struct net_bridge *br, struct net_bridge_port *port, __be32 group)
467{
468 struct net_bridge_mdb_htable *mdb = br->mdb;
469 struct net_bridge_mdb_entry *mp;
470 int hash;
471
472 if (!mdb) {
473 if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
474 return NULL;
475 goto rehash;
476 }
477
478 hash = br_ip_hash(mdb, group);
479 mp = br_multicast_get_group(br, port, group, hash);
480 switch (PTR_ERR(mp)) {
481 case 0:
482 break;
483
484 case -EAGAIN:
485rehash:
486 mdb = br->mdb;
487 hash = br_ip_hash(mdb, group);
488 break;
489
490 default:
491 goto out;
492 }
493
494 mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
495 if (unlikely(!mp))
496 goto out;
497
498 mp->br = br;
499 mp->addr = group;
500 setup_timer(&mp->timer, br_multicast_group_expired,
501 (unsigned long)mp);
502 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
503 (unsigned long)mp);
504
505 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
506 mdb->size++;
507
508out:
509 return mp;
510}
511
512static int br_multicast_add_group(struct net_bridge *br,
513 struct net_bridge_port *port, __be32 group)
514{
515 struct net_bridge_mdb_entry *mp;
516 struct net_bridge_port_group *p;
517 struct net_bridge_port_group **pp;
518 unsigned long now = jiffies;
519 int err;
520
521 if (ipv4_is_local_multicast(group))
522 return 0;
523
524 spin_lock(&br->multicast_lock);
525 if (!netif_running(br->dev) ||
526 (port && port->state == BR_STATE_DISABLED))
527 goto out;
528
529 mp = br_multicast_new_group(br, port, group);
530 err = PTR_ERR(mp);
531 if (unlikely(IS_ERR(mp) || !mp))
532 goto err;
533
534 if (!port) {
535 hlist_add_head(&mp->mglist, &br->mglist);
536 mod_timer(&mp->timer, now + br->multicast_membership_interval);
537 goto out;
538 }
539
540 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
541 if (p->port == port)
542 goto found;
543 if ((unsigned long)p->port < (unsigned long)port)
544 break;
545 }
546
547 p = kzalloc(sizeof(*p), GFP_ATOMIC);
548 err = -ENOMEM;
549 if (unlikely(!p))
550 goto err;
551
552 p->addr = group;
553 p->port = port;
554 p->next = *pp;
555 hlist_add_head(&p->mglist, &port->mglist);
556 setup_timer(&p->timer, br_multicast_port_group_expired,
557 (unsigned long)p);
558 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
559 (unsigned long)p);
560
561 rcu_assign_pointer(*pp, p);
562
563found:
564 mod_timer(&p->timer, now + br->multicast_membership_interval);
565out:
566 err = 0;
567
568err:
569 spin_unlock(&br->multicast_lock);
570 return err;
571}
572
573static void br_multicast_router_expired(unsigned long data)
574{
575 struct net_bridge_port *port = (void *)data;
576 struct net_bridge *br = port->br;
577
578 spin_lock(&br->multicast_lock);
579 if (port->multicast_router != 1 ||
580 timer_pending(&port->multicast_router_timer) ||
581 hlist_unhashed(&port->rlist))
582 goto out;
583
584 hlist_del_init_rcu(&port->rlist);
585
586out:
587 spin_unlock(&br->multicast_lock);
588}
589
590static void br_multicast_local_router_expired(unsigned long data)
591{
592}
593
594static void br_multicast_send_query(struct net_bridge *br,
595 struct net_bridge_port *port, u32 sent)
596{
597 unsigned long time;
598 struct sk_buff *skb;
599
600 if (!netif_running(br->dev) || br->multicast_disabled ||
601 timer_pending(&br->multicast_querier_timer))
602 return;
603
604 skb = br_multicast_alloc_query(br, 0);
605 if (!skb)
606 goto timer;
607
608 if (port) {
609 __skb_push(skb, sizeof(struct ethhdr));
610 skb->dev = port->dev;
611 NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
612 dev_queue_xmit);
613 } else
614 netif_rx(skb);
615
616timer:
617 time = jiffies;
618 time += sent < br->multicast_startup_query_count ?
619 br->multicast_startup_query_interval :
620 br->multicast_query_interval;
621 mod_timer(port ? &port->multicast_query_timer :
622 &br->multicast_query_timer, time);
623}
624
625static void br_multicast_port_query_expired(unsigned long data)
626{
627 struct net_bridge_port *port = (void *)data;
628 struct net_bridge *br = port->br;
629
630 spin_lock(&br->multicast_lock);
631 if (port->state == BR_STATE_DISABLED ||
632 port->state == BR_STATE_BLOCKING)
633 goto out;
634
635 if (port->multicast_startup_queries_sent <
636 br->multicast_startup_query_count)
637 port->multicast_startup_queries_sent++;
638
639 br_multicast_send_query(port->br, port,
640 port->multicast_startup_queries_sent);
641
642out:
643 spin_unlock(&br->multicast_lock);
644}
645
646void br_multicast_add_port(struct net_bridge_port *port)
647{
648 port->multicast_router = 1;
649
650 setup_timer(&port->multicast_router_timer, br_multicast_router_expired,
651 (unsigned long)port);
652 setup_timer(&port->multicast_query_timer,
653 br_multicast_port_query_expired, (unsigned long)port);
654}
655
656void br_multicast_del_port(struct net_bridge_port *port)
657{
658 del_timer_sync(&port->multicast_router_timer);
659}
660
661static void __br_multicast_enable_port(struct net_bridge_port *port)
662{
663 port->multicast_startup_queries_sent = 0;
664
665 if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 ||
666 del_timer(&port->multicast_query_timer))
667 mod_timer(&port->multicast_query_timer, jiffies);
668}
669
670void br_multicast_enable_port(struct net_bridge_port *port)
671{
672 struct net_bridge *br = port->br;
673
674 spin_lock(&br->multicast_lock);
675 if (br->multicast_disabled || !netif_running(br->dev))
676 goto out;
677
678 __br_multicast_enable_port(port);
679
680out:
681 spin_unlock(&br->multicast_lock);
682}
683
684void br_multicast_disable_port(struct net_bridge_port *port)
685{
686 struct net_bridge *br = port->br;
687 struct net_bridge_port_group *pg;
688 struct hlist_node *p, *n;
689
690 spin_lock(&br->multicast_lock);
691 hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist)
692 br_multicast_del_pg(br, pg);
693
694 if (!hlist_unhashed(&port->rlist))
695 hlist_del_init_rcu(&port->rlist);
696 del_timer(&port->multicast_router_timer);
697 del_timer(&port->multicast_query_timer);
698 spin_unlock(&br->multicast_lock);
699}
700
701static int br_multicast_igmp3_report(struct net_bridge *br,
702 struct net_bridge_port *port,
703 struct sk_buff *skb)
704{
705 struct igmpv3_report *ih;
706 struct igmpv3_grec *grec;
707 int i;
708 int len;
709 int num;
710 int type;
711 int err = 0;
712 __be32 group;
713
714 if (!pskb_may_pull(skb, sizeof(*ih)))
715 return -EINVAL;
716
717 ih = igmpv3_report_hdr(skb);
718 num = ntohs(ih->ngrec);
719 len = sizeof(*ih);
720
721 for (i = 0; i < num; i++) {
722 len += sizeof(*grec);
723 if (!pskb_may_pull(skb, len))
724 return -EINVAL;
725
726 grec = (void *)(skb->data + len - sizeof(*grec));
727 group = grec->grec_mca;
728 type = grec->grec_type;
729
730 len += ntohs(grec->grec_nsrcs) * 4;
731 if (!pskb_may_pull(skb, len))
732 return -EINVAL;
733
734 /* We treat this as an IGMPv2 report for now. */
735 switch (type) {
736 case IGMPV3_MODE_IS_INCLUDE:
737 case IGMPV3_MODE_IS_EXCLUDE:
738 case IGMPV3_CHANGE_TO_INCLUDE:
739 case IGMPV3_CHANGE_TO_EXCLUDE:
740 case IGMPV3_ALLOW_NEW_SOURCES:
741 case IGMPV3_BLOCK_OLD_SOURCES:
742 break;
743
744 default:
745 continue;
746 }
747
748 err = br_multicast_add_group(br, port, group);
749 if (err)
750 break;
751 }
752
753 return err;
754}
755
756static void br_multicast_add_router(struct net_bridge *br,
757 struct net_bridge_port *port)
758{
759 struct hlist_node *p;
760 struct hlist_node **h;
761
762 for (h = &br->router_list.first;
763 (p = *h) &&
764 (unsigned long)container_of(p, struct net_bridge_port, rlist) >
765 (unsigned long)port;
766 h = &p->next)
767 ;
768
769 port->rlist.pprev = h;
770 port->rlist.next = p;
771 rcu_assign_pointer(*h, &port->rlist);
772 if (p)
773 p->pprev = &port->rlist.next;
774}
775
776static void br_multicast_mark_router(struct net_bridge *br,
777 struct net_bridge_port *port)
778{
779 unsigned long now = jiffies;
780
781 if (!port) {
782 if (br->multicast_router == 1)
783 mod_timer(&br->multicast_router_timer,
784 now + br->multicast_querier_interval);
785 return;
786 }
787
788 if (port->multicast_router != 1)
789 return;
790
791 if (!hlist_unhashed(&port->rlist))
792 goto timer;
793
794 br_multicast_add_router(br, port);
795
796timer:
797 mod_timer(&port->multicast_router_timer,
798 now + br->multicast_querier_interval);
799}
800
801static void br_multicast_query_received(struct net_bridge *br,
802 struct net_bridge_port *port,
803 __be32 saddr)
804{
805 if (saddr)
806 mod_timer(&br->multicast_querier_timer,
807 jiffies + br->multicast_querier_interval);
808 else if (timer_pending(&br->multicast_querier_timer))
809 return;
810
811 br_multicast_mark_router(br, port);
812}
813
814static int br_multicast_query(struct net_bridge *br,
815 struct net_bridge_port *port,
816 struct sk_buff *skb)
817{
818 struct iphdr *iph = ip_hdr(skb);
819 struct igmphdr *ih = igmp_hdr(skb);
820 struct net_bridge_mdb_entry *mp;
821 struct igmpv3_query *ih3;
822 struct net_bridge_port_group *p;
823 struct net_bridge_port_group **pp;
824 unsigned long max_delay;
825 unsigned long now = jiffies;
826 __be32 group;
827 int err = 0;
828
829 spin_lock(&br->multicast_lock);
830 if (!netif_running(br->dev) ||
831 (port && port->state == BR_STATE_DISABLED))
832 goto out;
833
834 br_multicast_query_received(br, port, iph->saddr);
835
836 group = ih->group;
837
838 if (skb->len == sizeof(*ih)) {
839 max_delay = ih->code * (HZ / IGMP_TIMER_SCALE);
840
841 if (!max_delay) {
842 max_delay = 10 * HZ;
843 group = 0;
844 }
845 } else {
846 if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) {
847 err = -EINVAL;
848 goto out;
849 }
850
851 ih3 = igmpv3_query_hdr(skb);
852 if (ih3->nsrcs)
853 goto out;
854
855 max_delay = ih3->code ?
856 IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
857 }
858
859 if (!group)
860 goto out;
861
862 mp = br_mdb_ip_get(br->mdb, group);
863 if (!mp)
864 goto out;
865
866 max_delay *= br->multicast_last_member_count;
867
868 if (!hlist_unhashed(&mp->mglist) &&
869 (timer_pending(&mp->timer) ?
870 time_after(mp->timer.expires, now + max_delay) :
871 try_to_del_timer_sync(&mp->timer) >= 0))
872 mod_timer(&mp->timer, now + max_delay);
873
874 for (pp = &mp->ports; (p = *pp); pp = &p->next) {
875 if (timer_pending(&p->timer) ?
876 time_after(p->timer.expires, now + max_delay) :
877 try_to_del_timer_sync(&p->timer) >= 0)
878 mod_timer(&mp->timer, now + max_delay);
879 }
880
881out:
882 spin_unlock(&br->multicast_lock);
883 return err;
884}
885
886static void br_multicast_leave_group(struct net_bridge *br,
887 struct net_bridge_port *port,
888 __be32 group)
889{
890 struct net_bridge_mdb_htable *mdb;
891 struct net_bridge_mdb_entry *mp;
892 struct net_bridge_port_group *p;
893 unsigned long now;
894 unsigned long time;
895
896 if (ipv4_is_local_multicast(group))
897 return;
898
899 spin_lock(&br->multicast_lock);
900 if (!netif_running(br->dev) ||
901 (port && port->state == BR_STATE_DISABLED) ||
902 timer_pending(&br->multicast_querier_timer))
903 goto out;
904
905 mdb = br->mdb;
906 mp = br_mdb_ip_get(mdb, group);
907 if (!mp)
908 goto out;
909
910 now = jiffies;
911 time = now + br->multicast_last_member_count *
912 br->multicast_last_member_interval;
913
914 if (!port) {
915 if (!hlist_unhashed(&mp->mglist) &&
916 (timer_pending(&mp->timer) ?
917 time_after(mp->timer.expires, time) :
918 try_to_del_timer_sync(&mp->timer) >= 0)) {
919 mod_timer(&mp->timer, time);
920
921 mp->queries_sent = 0;
922 mod_timer(&mp->query_timer, now);
923 }
924
925 goto out;
926 }
927
928 for (p = mp->ports; p; p = p->next) {
929 if (p->port != port)
930 continue;
931
932 if (!hlist_unhashed(&p->mglist) &&
933 (timer_pending(&p->timer) ?
934 time_after(p->timer.expires, time) :
935 try_to_del_timer_sync(&p->timer) >= 0)) {
936 mod_timer(&p->timer, time);
937
938 p->queries_sent = 0;
939 mod_timer(&p->query_timer, now);
940 }
941
942 break;
943 }
944
945out:
946 spin_unlock(&br->multicast_lock);
947}
948
949static int br_multicast_ipv4_rcv(struct net_bridge *br,
950 struct net_bridge_port *port,
951 struct sk_buff *skb)
952{
953 struct sk_buff *skb2 = skb;
954 struct iphdr *iph;
955 struct igmphdr *ih;
956 unsigned len;
957 unsigned offset;
958 int err;
959
960 /* We treat OOM as packet loss for now. */
961 if (!pskb_may_pull(skb, sizeof(*iph)))
962 return -EINVAL;
963
964 iph = ip_hdr(skb);
965
966 if (iph->ihl < 5 || iph->version != 4)
967 return -EINVAL;
968
969 if (!pskb_may_pull(skb, ip_hdrlen(skb)))
970 return -EINVAL;
971
972 iph = ip_hdr(skb);
973
974 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
975 return -EINVAL;
976
977 if (iph->protocol != IPPROTO_IGMP)
978 return 0;
979
980 len = ntohs(iph->tot_len);
981 if (skb->len < len || len < ip_hdrlen(skb))
982 return -EINVAL;
983
984 if (skb->len > len) {
985 skb2 = skb_clone(skb, GFP_ATOMIC);
986 if (!skb2)
987 return -ENOMEM;
988
989 err = pskb_trim_rcsum(skb2, len);
990 if (err)
991 goto err_out;
992 }
993
994 len -= ip_hdrlen(skb2);
995 offset = skb_network_offset(skb2) + ip_hdrlen(skb2);
996 __skb_pull(skb2, offset);
997 skb_reset_transport_header(skb2);
998
999 err = -EINVAL;
1000 if (!pskb_may_pull(skb2, sizeof(*ih)))
1001 goto out;
1002
1003 iph = ip_hdr(skb2);
1004
1005 switch (skb2->ip_summed) {
1006 case CHECKSUM_COMPLETE:
1007 if (!csum_fold(skb2->csum))
1008 break;
1009 /* fall through */
1010 case CHECKSUM_NONE:
1011 skb2->csum = 0;
1012 if (skb_checksum_complete(skb2))
1013 goto out;
1014 }
1015
1016 err = 0;
1017
1018 BR_INPUT_SKB_CB(skb)->igmp = 1;
1019 ih = igmp_hdr(skb2);
1020
1021 switch (ih->type) {
1022 case IGMP_HOST_MEMBERSHIP_REPORT:
1023 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1024 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1;
1025 err = br_multicast_add_group(br, port, ih->group);
1026 break;
1027 case IGMPV3_HOST_MEMBERSHIP_REPORT:
1028 err = br_multicast_igmp3_report(br, port, skb2);
1029 break;
1030 case IGMP_HOST_MEMBERSHIP_QUERY:
1031 err = br_multicast_query(br, port, skb2);
1032 break;
1033 case IGMP_HOST_LEAVE_MESSAGE:
1034 br_multicast_leave_group(br, port, ih->group);
1035 break;
1036 }
1037
1038out:
1039 __skb_push(skb2, offset);
1040err_out:
1041 if (skb2 != skb)
1042 kfree_skb(skb2);
1043 return err;
1044}
1045
1046int br_multicast_rcv(struct net_bridge *br, struct net_bridge_port *port,
1047 struct sk_buff *skb)
1048{
1049 BR_INPUT_SKB_CB(skb)->igmp = 0;
1050 BR_INPUT_SKB_CB(skb)->mrouters_only = 0;
1051
1052 if (br->multicast_disabled)
1053 return 0;
1054
1055 switch (skb->protocol) {
1056 case htons(ETH_P_IP):
1057 return br_multicast_ipv4_rcv(br, port, skb);
1058 }
1059
1060 return 0;
1061}
1062
1063static void br_multicast_query_expired(unsigned long data)
1064{
1065 struct net_bridge *br = (void *)data;
1066
1067 spin_lock(&br->multicast_lock);
1068 if (br->multicast_startup_queries_sent <
1069 br->multicast_startup_query_count)
1070 br->multicast_startup_queries_sent++;
1071
1072 br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent);
1073
1074 spin_unlock(&br->multicast_lock);
1075}
1076
1077void br_multicast_init(struct net_bridge *br)
1078{
1079 br->hash_elasticity = 4;
1080 br->hash_max = 512;
1081
1082 br->multicast_router = 1;
1083 br->multicast_last_member_count = 2;
1084 br->multicast_startup_query_count = 2;
1085
1086 br->multicast_last_member_interval = HZ;
1087 br->multicast_query_response_interval = 10 * HZ;
1088 br->multicast_startup_query_interval = 125 * HZ / 4;
1089 br->multicast_query_interval = 125 * HZ;
1090 br->multicast_querier_interval = 255 * HZ;
1091 br->multicast_membership_interval = 260 * HZ;
1092
1093 spin_lock_init(&br->multicast_lock);
1094 setup_timer(&br->multicast_router_timer,
1095 br_multicast_local_router_expired, 0);
1096 setup_timer(&br->multicast_querier_timer,
1097 br_multicast_local_router_expired, 0);
1098 setup_timer(&br->multicast_query_timer, br_multicast_query_expired,
1099 (unsigned long)br);
1100}
1101
1102void br_multicast_open(struct net_bridge *br)
1103{
1104 br->multicast_startup_queries_sent = 0;
1105
1106 if (br->multicast_disabled)
1107 return;
1108
1109 mod_timer(&br->multicast_query_timer, jiffies);
1110}
1111
1112void br_multicast_stop(struct net_bridge *br)
1113{
1114 struct net_bridge_mdb_htable *mdb;
1115 struct net_bridge_mdb_entry *mp;
1116 struct hlist_node *p, *n;
1117 u32 ver;
1118 int i;
1119
1120 del_timer_sync(&br->multicast_router_timer);
1121 del_timer_sync(&br->multicast_querier_timer);
1122 del_timer_sync(&br->multicast_query_timer);
1123
1124 spin_lock_bh(&br->multicast_lock);
1125 mdb = br->mdb;
1126 if (!mdb)
1127 goto out;
1128
1129 br->mdb = NULL;
1130
1131 ver = mdb->ver;
1132 for (i = 0; i < mdb->max; i++) {
1133 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1134 hlist[ver]) {
1135 del_timer(&mp->timer);
1136 del_timer(&mp->query_timer);
1137 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1138 }
1139 }
1140
1141 if (mdb->old) {
1142 spin_unlock_bh(&br->multicast_lock);
1143 rcu_barrier_bh();
1144 spin_lock_bh(&br->multicast_lock);
1145 WARN_ON(mdb->old);
1146 }
1147
1148 mdb->old = mdb;
1149 call_rcu_bh(&mdb->rcu, br_mdb_free);
1150
1151out:
1152 spin_unlock_bh(&br->multicast_lock);
1153}
1154
1155int br_multicast_set_router(struct net_bridge *br, unsigned long val)
1156{
1157 int err = -ENOENT;
1158
1159 spin_lock_bh(&br->multicast_lock);
1160 if (!netif_running(br->dev))
1161 goto unlock;
1162
1163 switch (val) {
1164 case 0:
1165 case 2:
1166 del_timer(&br->multicast_router_timer);
1167 /* fall through */
1168 case 1:
1169 br->multicast_router = val;
1170 err = 0;
1171 break;
1172
1173 default:
1174 err = -EINVAL;
1175 break;
1176 }
1177
1178unlock:
1179 spin_unlock_bh(&br->multicast_lock);
1180
1181 return err;
1182}
1183
1184int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val)
1185{
1186 struct net_bridge *br = p->br;
1187 int err = -ENOENT;
1188
1189 spin_lock(&br->multicast_lock);
1190 if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED)
1191 goto unlock;
1192
1193 switch (val) {
1194 case 0:
1195 case 1:
1196 case 2:
1197 p->multicast_router = val;
1198 err = 0;
1199
1200 if (val < 2 && !hlist_unhashed(&p->rlist))
1201 hlist_del_init_rcu(&p->rlist);
1202
1203 if (val == 1)
1204 break;
1205
1206 del_timer(&p->multicast_router_timer);
1207
1208 if (val == 0)
1209 break;
1210
1211 br_multicast_add_router(br, p);
1212 break;
1213
1214 default:
1215 err = -EINVAL;
1216 break;
1217 }
1218
1219unlock:
1220 spin_unlock(&br->multicast_lock);
1221
1222 return err;
1223}
1224
1225int br_multicast_toggle(struct net_bridge *br, unsigned long val)
1226{
1227 struct net_bridge_port *port;
1228 int err = -ENOENT;
1229
1230 spin_lock(&br->multicast_lock);
1231 if (!netif_running(br->dev))
1232 goto unlock;
1233
1234 err = 0;
1235 if (br->multicast_disabled == !val)
1236 goto unlock;
1237
1238 br->multicast_disabled = !val;
1239 if (br->multicast_disabled)
1240 goto unlock;
1241
1242 if (br->mdb) {
1243 if (br->mdb->old) {
1244 err = -EEXIST;
1245rollback:
1246 br->multicast_disabled = !!val;
1247 goto unlock;
1248 }
1249
1250 err = br_mdb_rehash(&br->mdb, br->mdb->max,
1251 br->hash_elasticity);
1252 if (err)
1253 goto rollback;
1254 }
1255
1256 br_multicast_open(br);
1257 list_for_each_entry(port, &br->port_list, list) {
1258 if (port->state == BR_STATE_DISABLED ||
1259 port->state == BR_STATE_BLOCKING)
1260 continue;
1261
1262 __br_multicast_enable_port(port);
1263 }
1264
1265unlock:
1266 spin_unlock(&br->multicast_lock);
1267
1268 return err;
1269}
1270
1271int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
1272{
1273 int err = -ENOENT;
1274 u32 old;
1275
1276 spin_lock(&br->multicast_lock);
1277 if (!netif_running(br->dev))
1278 goto unlock;
1279
1280 err = -EINVAL;
1281 if (!is_power_of_2(val))
1282 goto unlock;
1283 if (br->mdb && val < br->mdb->size)
1284 goto unlock;
1285
1286 err = 0;
1287
1288 old = br->hash_max;
1289 br->hash_max = val;
1290
1291 if (br->mdb) {
1292 if (br->mdb->old) {
1293 err = -EEXIST;
1294rollback:
1295 br->hash_max = old;
1296 goto unlock;
1297 }
1298
1299 err = br_mdb_rehash(&br->mdb, br->hash_max,
1300 br->hash_elasticity);
1301 if (err)
1302 goto rollback;
1303 }
1304
1305unlock:
1306 spin_unlock(&br->multicast_lock);
1307
1308 return err;
1309}
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index a16a2342f6bf..4c4977d12fd6 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -23,6 +23,7 @@
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h>
26#include <linux/ip.h> 27#include <linux/ip.h>
27#include <linux/netdevice.h> 28#include <linux/netdevice.h>
28#include <linux/skbuff.h> 29#include <linux/skbuff.h>
@@ -1013,12 +1014,12 @@ static ctl_table brnf_table[] = {
1013 .mode = 0644, 1014 .mode = 0644,
1014 .proc_handler = brnf_sysctl_call_tables, 1015 .proc_handler = brnf_sysctl_call_tables,
1015 }, 1016 },
1016 { .ctl_name = 0 } 1017 { }
1017}; 1018};
1018 1019
1019static struct ctl_path brnf_path[] = { 1020static struct ctl_path brnf_path[] = {
1020 { .procname = "net", .ctl_name = CTL_NET, }, 1021 { .procname = "net", },
1021 { .procname = "bridge", .ctl_name = NET_BRIDGE, }, 1022 { .procname = "bridge", },
1022 { } 1023 { }
1023}; 1024};
1024#endif 1025#endif
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index fcffb3fb1177..aa56ac2c8829 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/slab.h>
14#include <net/rtnetlink.h> 15#include <net/rtnetlink.h>
15#include <net/net_namespace.h> 16#include <net/net_namespace.h>
16#include <net/sock.h> 17#include <net/sock.h>
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 2114e45682ea..846d7d1e2075 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -57,6 +57,41 @@ struct net_bridge_fdb_entry
57 unsigned char is_static; 57 unsigned char is_static;
58}; 58};
59 59
60struct net_bridge_port_group {
61 struct net_bridge_port *port;
62 struct net_bridge_port_group *next;
63 struct hlist_node mglist;
64 struct rcu_head rcu;
65 struct timer_list timer;
66 struct timer_list query_timer;
67 __be32 addr;
68 u32 queries_sent;
69};
70
71struct net_bridge_mdb_entry
72{
73 struct hlist_node hlist[2];
74 struct hlist_node mglist;
75 struct net_bridge *br;
76 struct net_bridge_port_group *ports;
77 struct rcu_head rcu;
78 struct timer_list timer;
79 struct timer_list query_timer;
80 __be32 addr;
81 u32 queries_sent;
82};
83
84struct net_bridge_mdb_htable
85{
86 struct hlist_head *mhash;
87 struct rcu_head rcu;
88 struct net_bridge_mdb_htable *old;
89 u32 size;
90 u32 max;
91 u32 secret;
92 u32 ver;
93};
94
60struct net_bridge_port 95struct net_bridge_port
61{ 96{
62 struct net_bridge *br; 97 struct net_bridge *br;
@@ -84,6 +119,15 @@ struct net_bridge_port
84 119
85 unsigned long flags; 120 unsigned long flags;
86#define BR_HAIRPIN_MODE 0x00000001 121#define BR_HAIRPIN_MODE 0x00000001
122
123#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
124 u32 multicast_startup_queries_sent;
125 unsigned char multicast_router;
126 struct timer_list multicast_router_timer;
127 struct timer_list multicast_query_timer;
128 struct hlist_head mglist;
129 struct hlist_node rlist;
130#endif
87}; 131};
88 132
89struct net_bridge 133struct net_bridge
@@ -93,7 +137,6 @@ struct net_bridge
93 struct net_device *dev; 137 struct net_device *dev;
94 spinlock_t hash_lock; 138 spinlock_t hash_lock;
95 struct hlist_head hash[BR_HASH_SIZE]; 139 struct hlist_head hash[BR_HASH_SIZE];
96 struct list_head age_list;
97 unsigned long feature_mask; 140 unsigned long feature_mask;
98#ifdef CONFIG_BRIDGE_NETFILTER 141#ifdef CONFIG_BRIDGE_NETFILTER
99 struct rtable fake_rtable; 142 struct rtable fake_rtable;
@@ -125,6 +168,35 @@ struct net_bridge
125 unsigned char topology_change; 168 unsigned char topology_change;
126 unsigned char topology_change_detected; 169 unsigned char topology_change_detected;
127 170
171#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
172 unsigned char multicast_router;
173
174 u8 multicast_disabled:1;
175
176 u32 hash_elasticity;
177 u32 hash_max;
178
179 u32 multicast_last_member_count;
180 u32 multicast_startup_queries_sent;
181 u32 multicast_startup_query_count;
182
183 unsigned long multicast_last_member_interval;
184 unsigned long multicast_membership_interval;
185 unsigned long multicast_querier_interval;
186 unsigned long multicast_query_interval;
187 unsigned long multicast_query_response_interval;
188 unsigned long multicast_startup_query_interval;
189
190 spinlock_t multicast_lock;
191 struct net_bridge_mdb_htable *mdb;
192 struct hlist_head router_list;
193 struct hlist_head mglist;
194
195 struct timer_list multicast_router_timer;
196 struct timer_list multicast_querier_timer;
197 struct timer_list multicast_query_timer;
198#endif
199
128 struct timer_list hello_timer; 200 struct timer_list hello_timer;
129 struct timer_list tcn_timer; 201 struct timer_list tcn_timer;
130 struct timer_list topology_change_timer; 202 struct timer_list topology_change_timer;
@@ -132,6 +204,22 @@ struct net_bridge
132 struct kobject *ifobj; 204 struct kobject *ifobj;
133}; 205};
134 206
207struct br_input_skb_cb {
208 struct net_device *brdev;
209#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
210 int igmp;
211 int mrouters_only;
212#endif
213};
214
215#define BR_INPUT_SKB_CB(__skb) ((struct br_input_skb_cb *)(__skb)->cb)
216
217#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
218# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (BR_INPUT_SKB_CB(__skb)->mrouters_only)
219#else
220# define BR_INPUT_SKB_CB_MROUTERS_ONLY(__skb) (0)
221#endif
222
135extern struct notifier_block br_device_notifier; 223extern struct notifier_block br_device_notifier;
136extern const u8 br_group_address[ETH_ALEN]; 224extern const u8 br_group_address[ETH_ALEN];
137 225
@@ -172,10 +260,11 @@ extern void br_deliver(const struct net_bridge_port *to,
172 struct sk_buff *skb); 260 struct sk_buff *skb);
173extern int br_dev_queue_push_xmit(struct sk_buff *skb); 261extern int br_dev_queue_push_xmit(struct sk_buff *skb);
174extern void br_forward(const struct net_bridge_port *to, 262extern void br_forward(const struct net_bridge_port *to,
175 struct sk_buff *skb); 263 struct sk_buff *skb, struct sk_buff *skb0);
176extern int br_forward_finish(struct sk_buff *skb); 264extern int br_forward_finish(struct sk_buff *skb);
177extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb); 265extern void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb);
178extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb); 266extern void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
267 struct sk_buff *skb2);
179 268
180/* br_if.c */ 269/* br_if.c */
181extern void br_port_carrier_check(struct net_bridge_port *p); 270extern void br_port_carrier_check(struct net_bridge_port *p);
@@ -198,6 +287,94 @@ extern struct sk_buff *br_handle_frame(struct net_bridge_port *p,
198extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 287extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
199extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg); 288extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
200 289
290/* br_multicast.c */
291#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
292extern int br_multicast_rcv(struct net_bridge *br,
293 struct net_bridge_port *port,
294 struct sk_buff *skb);
295extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
296 struct sk_buff *skb);
297extern void br_multicast_add_port(struct net_bridge_port *port);
298extern void br_multicast_del_port(struct net_bridge_port *port);
299extern void br_multicast_enable_port(struct net_bridge_port *port);
300extern void br_multicast_disable_port(struct net_bridge_port *port);
301extern void br_multicast_init(struct net_bridge *br);
302extern void br_multicast_open(struct net_bridge *br);
303extern void br_multicast_stop(struct net_bridge *br);
304extern void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
305 struct sk_buff *skb);
306extern void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
307 struct sk_buff *skb, struct sk_buff *skb2);
308extern int br_multicast_set_router(struct net_bridge *br, unsigned long val);
309extern int br_multicast_set_port_router(struct net_bridge_port *p,
310 unsigned long val);
311extern int br_multicast_toggle(struct net_bridge *br, unsigned long val);
312extern int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val);
313
314static inline bool br_multicast_is_router(struct net_bridge *br)
315{
316 return br->multicast_router == 2 ||
317 (br->multicast_router == 1 &&
318 timer_pending(&br->multicast_router_timer));
319}
320#else
321static inline int br_multicast_rcv(struct net_bridge *br,
322 struct net_bridge_port *port,
323 struct sk_buff *skb)
324{
325 return 0;
326}
327
328static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
329 struct sk_buff *skb)
330{
331 return NULL;
332}
333
334static inline void br_multicast_add_port(struct net_bridge_port *port)
335{
336}
337
338static inline void br_multicast_del_port(struct net_bridge_port *port)
339{
340}
341
342static inline void br_multicast_enable_port(struct net_bridge_port *port)
343{
344}
345
346static inline void br_multicast_disable_port(struct net_bridge_port *port)
347{
348}
349
350static inline void br_multicast_init(struct net_bridge *br)
351{
352}
353
354static inline void br_multicast_open(struct net_bridge *br)
355{
356}
357
358static inline void br_multicast_stop(struct net_bridge *br)
359{
360}
361
362static inline void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
363 struct sk_buff *skb)
364{
365}
366
367static inline void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
368 struct sk_buff *skb,
369 struct sk_buff *skb2)
370{
371}
372static inline bool br_multicast_is_router(struct net_bridge *br)
373{
374 return 0;
375}
376#endif
377
201/* br_netfilter.c */ 378/* br_netfilter.c */
202#ifdef CONFIG_BRIDGE_NETFILTER 379#ifdef CONFIG_BRIDGE_NETFILTER
203extern int br_netfilter_init(void); 380extern int br_netfilter_init(void);
@@ -254,7 +431,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
254 431
255#ifdef CONFIG_SYSFS 432#ifdef CONFIG_SYSFS
256/* br_sysfs_if.c */ 433/* br_sysfs_if.c */
257extern struct sysfs_ops brport_sysfs_ops; 434extern const struct sysfs_ops brport_sysfs_ops;
258extern int br_sysfs_addif(struct net_bridge_port *p); 435extern int br_sysfs_addif(struct net_bridge_port *p);
259 436
260/* br_sysfs_br.c */ 437/* br_sysfs_br.c */
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index fd3f8d6c0998..edcf14b560f6 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -386,6 +386,8 @@ static void br_make_forwarding(struct net_bridge_port *p)
386 else 386 else
387 p->state = BR_STATE_LEARNING; 387 p->state = BR_STATE_LEARNING;
388 388
389 br_multicast_enable_port(p);
390
389 br_log_state(p); 391 br_log_state(p);
390 392
391 if (br->forward_delay != 0) 393 if (br->forward_delay != 0)
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 81ae40b3f655..d66cce11f3bf 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -15,6 +15,7 @@
15#include <linux/netfilter_bridge.h> 15#include <linux/netfilter_bridge.h>
16#include <linux/etherdevice.h> 16#include <linux/etherdevice.h>
17#include <linux/llc.h> 17#include <linux/llc.h>
18#include <linux/slab.h>
18#include <net/net_namespace.h> 19#include <net/net_namespace.h>
19#include <net/llc.h> 20#include <net/llc.h>
20#include <net/llc_pdu.h> 21#include <net/llc_pdu.h>
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 9a52ac5b4525..d527119e9f54 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -108,6 +108,7 @@ void br_stp_disable_port(struct net_bridge_port *p)
108 del_timer(&p->hold_timer); 108 del_timer(&p->hold_timer);
109 109
110 br_fdb_delete_by_port(br, p, 0); 110 br_fdb_delete_by_port(br, p, 0);
111 br_multicast_disable_port(p);
111 112
112 br_configuration_update(br); 113 br_configuration_update(br);
113 114
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c
index ee4820aa1843..dd321e39e621 100644
--- a/net/bridge/br_sysfs_br.c
+++ b/net/bridge/br_sysfs_br.c
@@ -316,9 +316,9 @@ static ssize_t store_group_addr(struct device *d,
316 if (new_addr[5] & ~0xf) 316 if (new_addr[5] & ~0xf)
317 return -EINVAL; 317 return -EINVAL;
318 318
319 if (new_addr[5] == 1 /* 802.3x Pause address */ 319 if (new_addr[5] == 1 || /* 802.3x Pause address */
320 || new_addr[5] == 2 /* 802.3ad Slow protocols */ 320 new_addr[5] == 2 || /* 802.3ad Slow protocols */
321 || new_addr[5] == 3) /* 802.1X PAE address */ 321 new_addr[5] == 3) /* 802.1X PAE address */
322 return -EINVAL; 322 return -EINVAL;
323 323
324 spin_lock_bh(&br->lock); 324 spin_lock_bh(&br->lock);
@@ -345,6 +345,273 @@ static ssize_t store_flush(struct device *d,
345} 345}
346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush); 346static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
347 347
348#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
349static ssize_t show_multicast_router(struct device *d,
350 struct device_attribute *attr, char *buf)
351{
352 struct net_bridge *br = to_bridge(d);
353 return sprintf(buf, "%d\n", br->multicast_router);
354}
355
356static ssize_t store_multicast_router(struct device *d,
357 struct device_attribute *attr,
358 const char *buf, size_t len)
359{
360 return store_bridge_parm(d, buf, len, br_multicast_set_router);
361}
362static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
363 store_multicast_router);
364
365static ssize_t show_multicast_snooping(struct device *d,
366 struct device_attribute *attr,
367 char *buf)
368{
369 struct net_bridge *br = to_bridge(d);
370 return sprintf(buf, "%d\n", !br->multicast_disabled);
371}
372
373static ssize_t store_multicast_snooping(struct device *d,
374 struct device_attribute *attr,
375 const char *buf, size_t len)
376{
377 return store_bridge_parm(d, buf, len, br_multicast_toggle);
378}
379static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
380 show_multicast_snooping, store_multicast_snooping);
381
382static ssize_t show_hash_elasticity(struct device *d,
383 struct device_attribute *attr, char *buf)
384{
385 struct net_bridge *br = to_bridge(d);
386 return sprintf(buf, "%u\n", br->hash_elasticity);
387}
388
389static int set_elasticity(struct net_bridge *br, unsigned long val)
390{
391 br->hash_elasticity = val;
392 return 0;
393}
394
395static ssize_t store_hash_elasticity(struct device *d,
396 struct device_attribute *attr,
397 const char *buf, size_t len)
398{
399 return store_bridge_parm(d, buf, len, set_elasticity);
400}
401static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
402 store_hash_elasticity);
403
404static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
405 char *buf)
406{
407 struct net_bridge *br = to_bridge(d);
408 return sprintf(buf, "%u\n", br->hash_max);
409}
410
411static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
412 const char *buf, size_t len)
413{
414 return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
415}
416static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
417 store_hash_max);
418
419static ssize_t show_multicast_last_member_count(struct device *d,
420 struct device_attribute *attr,
421 char *buf)
422{
423 struct net_bridge *br = to_bridge(d);
424 return sprintf(buf, "%u\n", br->multicast_last_member_count);
425}
426
427static int set_last_member_count(struct net_bridge *br, unsigned long val)
428{
429 br->multicast_last_member_count = val;
430 return 0;
431}
432
433static ssize_t store_multicast_last_member_count(struct device *d,
434 struct device_attribute *attr,
435 const char *buf, size_t len)
436{
437 return store_bridge_parm(d, buf, len, set_last_member_count);
438}
439static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
440 show_multicast_last_member_count,
441 store_multicast_last_member_count);
442
443static ssize_t show_multicast_startup_query_count(
444 struct device *d, struct device_attribute *attr, char *buf)
445{
446 struct net_bridge *br = to_bridge(d);
447 return sprintf(buf, "%u\n", br->multicast_startup_query_count);
448}
449
450static int set_startup_query_count(struct net_bridge *br, unsigned long val)
451{
452 br->multicast_startup_query_count = val;
453 return 0;
454}
455
456static ssize_t store_multicast_startup_query_count(
457 struct device *d, struct device_attribute *attr, const char *buf,
458 size_t len)
459{
460 return store_bridge_parm(d, buf, len, set_startup_query_count);
461}
462static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
463 show_multicast_startup_query_count,
464 store_multicast_startup_query_count);
465
466static ssize_t show_multicast_last_member_interval(
467 struct device *d, struct device_attribute *attr, char *buf)
468{
469 struct net_bridge *br = to_bridge(d);
470 return sprintf(buf, "%lu\n",
471 jiffies_to_clock_t(br->multicast_last_member_interval));
472}
473
474static int set_last_member_interval(struct net_bridge *br, unsigned long val)
475{
476 br->multicast_last_member_interval = clock_t_to_jiffies(val);
477 return 0;
478}
479
480static ssize_t store_multicast_last_member_interval(
481 struct device *d, struct device_attribute *attr, const char *buf,
482 size_t len)
483{
484 return store_bridge_parm(d, buf, len, set_last_member_interval);
485}
486static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
487 show_multicast_last_member_interval,
488 store_multicast_last_member_interval);
489
490static ssize_t show_multicast_membership_interval(
491 struct device *d, struct device_attribute *attr, char *buf)
492{
493 struct net_bridge *br = to_bridge(d);
494 return sprintf(buf, "%lu\n",
495 jiffies_to_clock_t(br->multicast_membership_interval));
496}
497
498static int set_membership_interval(struct net_bridge *br, unsigned long val)
499{
500 br->multicast_membership_interval = clock_t_to_jiffies(val);
501 return 0;
502}
503
504static ssize_t store_multicast_membership_interval(
505 struct device *d, struct device_attribute *attr, const char *buf,
506 size_t len)
507{
508 return store_bridge_parm(d, buf, len, set_membership_interval);
509}
510static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
511 show_multicast_membership_interval,
512 store_multicast_membership_interval);
513
514static ssize_t show_multicast_querier_interval(struct device *d,
515 struct device_attribute *attr,
516 char *buf)
517{
518 struct net_bridge *br = to_bridge(d);
519 return sprintf(buf, "%lu\n",
520 jiffies_to_clock_t(br->multicast_querier_interval));
521}
522
523static int set_querier_interval(struct net_bridge *br, unsigned long val)
524{
525 br->multicast_querier_interval = clock_t_to_jiffies(val);
526 return 0;
527}
528
529static ssize_t store_multicast_querier_interval(struct device *d,
530 struct device_attribute *attr,
531 const char *buf, size_t len)
532{
533 return store_bridge_parm(d, buf, len, set_querier_interval);
534}
535static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
536 show_multicast_querier_interval,
537 store_multicast_querier_interval);
538
539static ssize_t show_multicast_query_interval(struct device *d,
540 struct device_attribute *attr,
541 char *buf)
542{
543 struct net_bridge *br = to_bridge(d);
544 return sprintf(buf, "%lu\n",
545 jiffies_to_clock_t(br->multicast_query_interval));
546}
547
548static int set_query_interval(struct net_bridge *br, unsigned long val)
549{
550 br->multicast_query_interval = clock_t_to_jiffies(val);
551 return 0;
552}
553
554static ssize_t store_multicast_query_interval(struct device *d,
555 struct device_attribute *attr,
556 const char *buf, size_t len)
557{
558 return store_bridge_parm(d, buf, len, set_query_interval);
559}
560static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
561 show_multicast_query_interval,
562 store_multicast_query_interval);
563
564static ssize_t show_multicast_query_response_interval(
565 struct device *d, struct device_attribute *attr, char *buf)
566{
567 struct net_bridge *br = to_bridge(d);
568 return sprintf(
569 buf, "%lu\n",
570 jiffies_to_clock_t(br->multicast_query_response_interval));
571}
572
573static int set_query_response_interval(struct net_bridge *br, unsigned long val)
574{
575 br->multicast_query_response_interval = clock_t_to_jiffies(val);
576 return 0;
577}
578
579static ssize_t store_multicast_query_response_interval(
580 struct device *d, struct device_attribute *attr, const char *buf,
581 size_t len)
582{
583 return store_bridge_parm(d, buf, len, set_query_response_interval);
584}
585static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
586 show_multicast_query_response_interval,
587 store_multicast_query_response_interval);
588
589static ssize_t show_multicast_startup_query_interval(
590 struct device *d, struct device_attribute *attr, char *buf)
591{
592 struct net_bridge *br = to_bridge(d);
593 return sprintf(
594 buf, "%lu\n",
595 jiffies_to_clock_t(br->multicast_startup_query_interval));
596}
597
598static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
599{
600 br->multicast_startup_query_interval = clock_t_to_jiffies(val);
601 return 0;
602}
603
604static ssize_t store_multicast_startup_query_interval(
605 struct device *d, struct device_attribute *attr, const char *buf,
606 size_t len)
607{
608 return store_bridge_parm(d, buf, len, set_startup_query_interval);
609}
610static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
611 show_multicast_startup_query_interval,
612 store_multicast_startup_query_interval);
613#endif
614
348static struct attribute *bridge_attrs[] = { 615static struct attribute *bridge_attrs[] = {
349 &dev_attr_forward_delay.attr, 616 &dev_attr_forward_delay.attr,
350 &dev_attr_hello_time.attr, 617 &dev_attr_hello_time.attr,
@@ -364,6 +631,20 @@ static struct attribute *bridge_attrs[] = {
364 &dev_attr_gc_timer.attr, 631 &dev_attr_gc_timer.attr,
365 &dev_attr_group_addr.attr, 632 &dev_attr_group_addr.attr,
366 &dev_attr_flush.attr, 633 &dev_attr_flush.attr,
634#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
635 &dev_attr_multicast_router.attr,
636 &dev_attr_multicast_snooping.attr,
637 &dev_attr_hash_elasticity.attr,
638 &dev_attr_hash_max.attr,
639 &dev_attr_multicast_last_member_count.attr,
640 &dev_attr_multicast_startup_query_count.attr,
641 &dev_attr_multicast_last_member_interval.attr,
642 &dev_attr_multicast_membership_interval.attr,
643 &dev_attr_multicast_querier_interval.attr,
644 &dev_attr_multicast_query_interval.attr,
645 &dev_attr_multicast_query_response_interval.attr,
646 &dev_attr_multicast_startup_query_interval.attr,
647#endif
367 NULL 648 NULL
368}; 649};
369 650
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 820643a3ba9c..0b9916489d6b 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -159,6 +159,21 @@ static ssize_t store_hairpin_mode(struct net_bridge_port *p, unsigned long v)
159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR, 159static BRPORT_ATTR(hairpin_mode, S_IRUGO | S_IWUSR,
160 show_hairpin_mode, store_hairpin_mode); 160 show_hairpin_mode, store_hairpin_mode);
161 161
162#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
163static ssize_t show_multicast_router(struct net_bridge_port *p, char *buf)
164{
165 return sprintf(buf, "%d\n", p->multicast_router);
166}
167
168static ssize_t store_multicast_router(struct net_bridge_port *p,
169 unsigned long v)
170{
171 return br_multicast_set_port_router(p, v);
172}
173static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
174 store_multicast_router);
175#endif
176
162static struct brport_attribute *brport_attrs[] = { 177static struct brport_attribute *brport_attrs[] = {
163 &brport_attr_path_cost, 178 &brport_attr_path_cost,
164 &brport_attr_priority, 179 &brport_attr_priority,
@@ -176,6 +191,9 @@ static struct brport_attribute *brport_attrs[] = {
176 &brport_attr_hold_timer, 191 &brport_attr_hold_timer,
177 &brport_attr_flush, 192 &brport_attr_flush,
178 &brport_attr_hairpin_mode, 193 &brport_attr_hairpin_mode,
194#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
195 &brport_attr_multicast_router,
196#endif
179 NULL 197 NULL
180}; 198};
181 199
@@ -220,7 +238,7 @@ static ssize_t brport_store(struct kobject * kobj,
220 return ret; 238 return ret;
221} 239}
222 240
223struct sysfs_ops brport_sysfs_ops = { 241const struct sysfs_ops brport_sysfs_ops = {
224 .show = brport_show, 242 .show = brport_show,
225 .store = brport_store, 243 .store = brport_store,
226}; 244};
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index bd91dc58d49b..5d1176758ca5 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -52,7 +52,7 @@ static struct xt_match ebt_802_3_mt_reg __read_mostly = {
52 .family = NFPROTO_BRIDGE, 52 .family = NFPROTO_BRIDGE,
53 .match = ebt_802_3_mt, 53 .match = ebt_802_3_mt,
54 .checkentry = ebt_802_3_mt_check, 54 .checkentry = ebt_802_3_mt_check,
55 .matchsize = XT_ALIGN(sizeof(struct ebt_802_3_info)), 55 .matchsize = sizeof(struct ebt_802_3_info),
56 .me = THIS_MODULE, 56 .me = THIS_MODULE,
57}; 57};
58 58
diff --git a/net/bridge/netfilter/ebt_arp.c b/net/bridge/netfilter/ebt_arp.c
index b7ad60419f9a..e727697c5847 100644
--- a/net/bridge/netfilter/ebt_arp.c
+++ b/net/bridge/netfilter/ebt_arp.c
@@ -120,7 +120,7 @@ static struct xt_match ebt_arp_mt_reg __read_mostly = {
120 .family = NFPROTO_BRIDGE, 120 .family = NFPROTO_BRIDGE,
121 .match = ebt_arp_mt, 121 .match = ebt_arp_mt,
122 .checkentry = ebt_arp_mt_check, 122 .checkentry = ebt_arp_mt_check,
123 .matchsize = XT_ALIGN(sizeof(struct ebt_arp_info)), 123 .matchsize = sizeof(struct ebt_arp_info),
124 .me = THIS_MODULE, 124 .me = THIS_MODULE,
125}; 125};
126 126
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 76584cd72e57..f392e9d93f53 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -78,7 +78,7 @@ static struct xt_target ebt_arpreply_tg_reg __read_mostly = {
78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING), 78 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_PRE_ROUTING),
79 .target = ebt_arpreply_tg, 79 .target = ebt_arpreply_tg,
80 .checkentry = ebt_arpreply_tg_check, 80 .checkentry = ebt_arpreply_tg_check,
81 .targetsize = XT_ALIGN(sizeof(struct ebt_arpreply_info)), 81 .targetsize = sizeof(struct ebt_arpreply_info),
82 .me = THIS_MODULE, 82 .me = THIS_MODULE,
83}; 83};
84 84
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index 6b49ea9e31fb..2bb40d728a35 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -54,7 +54,7 @@ static struct xt_target ebt_dnat_tg_reg __read_mostly = {
54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING), 54 (1 << NF_BR_LOCAL_OUT) | (1 << NF_BR_BROUTING),
55 .target = ebt_dnat_tg, 55 .target = ebt_dnat_tg,
56 .checkentry = ebt_dnat_tg_check, 56 .checkentry = ebt_dnat_tg_check,
57 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 57 .targetsize = sizeof(struct ebt_nat_info),
58 .me = THIS_MODULE, 58 .me = THIS_MODULE,
59}; 59};
60 60
diff --git a/net/bridge/netfilter/ebt_ip.c b/net/bridge/netfilter/ebt_ip.c
index d771bbfbcbe6..5de6df6f86b8 100644
--- a/net/bridge/netfilter/ebt_ip.c
+++ b/net/bridge/netfilter/ebt_ip.c
@@ -110,7 +110,7 @@ static struct xt_match ebt_ip_mt_reg __read_mostly = {
110 .family = NFPROTO_BRIDGE, 110 .family = NFPROTO_BRIDGE,
111 .match = ebt_ip_mt, 111 .match = ebt_ip_mt,
112 .checkentry = ebt_ip_mt_check, 112 .checkentry = ebt_ip_mt_check,
113 .matchsize = XT_ALIGN(sizeof(struct ebt_ip_info)), 113 .matchsize = sizeof(struct ebt_ip_info),
114 .me = THIS_MODULE, 114 .me = THIS_MODULE,
115}; 115};
116 116
diff --git a/net/bridge/netfilter/ebt_ip6.c b/net/bridge/netfilter/ebt_ip6.c
index 784a6573876c..bbf2534ef026 100644
--- a/net/bridge/netfilter/ebt_ip6.c
+++ b/net/bridge/netfilter/ebt_ip6.c
@@ -122,7 +122,7 @@ static struct xt_match ebt_ip6_mt_reg __read_mostly = {
122 .family = NFPROTO_BRIDGE, 122 .family = NFPROTO_BRIDGE,
123 .match = ebt_ip6_mt, 123 .match = ebt_ip6_mt,
124 .checkentry = ebt_ip6_mt_check, 124 .checkentry = ebt_ip6_mt_check,
125 .matchsize = XT_ALIGN(sizeof(struct ebt_ip6_info)), 125 .matchsize = sizeof(struct ebt_ip6_info),
126 .me = THIS_MODULE, 126 .me = THIS_MODULE,
127}; 127};
128 128
diff --git a/net/bridge/netfilter/ebt_limit.c b/net/bridge/netfilter/ebt_limit.c
index f7bd9192ff0c..7a8182710eb3 100644
--- a/net/bridge/netfilter/ebt_limit.c
+++ b/net/bridge/netfilter/ebt_limit.c
@@ -84,13 +84,29 @@ static bool ebt_limit_mt_check(const struct xt_mtchk_param *par)
84 return true; 84 return true;
85} 85}
86 86
87
88#ifdef CONFIG_COMPAT
89/*
90 * no conversion function needed --
91 * only avg/burst have meaningful values in userspace.
92 */
93struct ebt_compat_limit_info {
94 compat_uint_t avg, burst;
95 compat_ulong_t prev;
96 compat_uint_t credit, credit_cap, cost;
97};
98#endif
99
87static struct xt_match ebt_limit_mt_reg __read_mostly = { 100static struct xt_match ebt_limit_mt_reg __read_mostly = {
88 .name = "limit", 101 .name = "limit",
89 .revision = 0, 102 .revision = 0,
90 .family = NFPROTO_BRIDGE, 103 .family = NFPROTO_BRIDGE,
91 .match = ebt_limit_mt, 104 .match = ebt_limit_mt,
92 .checkentry = ebt_limit_mt_check, 105 .checkentry = ebt_limit_mt_check,
93 .matchsize = XT_ALIGN(sizeof(struct ebt_limit_info)), 106 .matchsize = sizeof(struct ebt_limit_info),
107#ifdef CONFIG_COMPAT
108 .compatsize = sizeof(struct ebt_compat_limit_info),
109#endif
94 .me = THIS_MODULE, 110 .me = THIS_MODULE,
95}; 111};
96 112
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index e4ea3fdd1d41..e873924ddb5d 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -195,7 +195,7 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
195 .family = NFPROTO_BRIDGE, 195 .family = NFPROTO_BRIDGE,
196 .target = ebt_log_tg, 196 .target = ebt_log_tg,
197 .checkentry = ebt_log_tg_check, 197 .checkentry = ebt_log_tg_check,
198 .targetsize = XT_ALIGN(sizeof(struct ebt_log_info)), 198 .targetsize = sizeof(struct ebt_log_info),
199 .me = THIS_MODULE, 199 .me = THIS_MODULE,
200}; 200};
201 201
diff --git a/net/bridge/netfilter/ebt_mark.c b/net/bridge/netfilter/ebt_mark.c
index 2fee7e8e2e93..2b5ce533d6b9 100644
--- a/net/bridge/netfilter/ebt_mark.c
+++ b/net/bridge/netfilter/ebt_mark.c
@@ -52,6 +52,32 @@ static bool ebt_mark_tg_check(const struct xt_tgchk_param *par)
52 return false; 52 return false;
53 return true; 53 return true;
54} 54}
55#ifdef CONFIG_COMPAT
56struct compat_ebt_mark_t_info {
57 compat_ulong_t mark;
58 compat_uint_t target;
59};
60
61static void mark_tg_compat_from_user(void *dst, const void *src)
62{
63 const struct compat_ebt_mark_t_info *user = src;
64 struct ebt_mark_t_info *kern = dst;
65
66 kern->mark = user->mark;
67 kern->target = user->target;
68}
69
70static int mark_tg_compat_to_user(void __user *dst, const void *src)
71{
72 struct compat_ebt_mark_t_info __user *user = dst;
73 const struct ebt_mark_t_info *kern = src;
74
75 if (put_user(kern->mark, &user->mark) ||
76 put_user(kern->target, &user->target))
77 return -EFAULT;
78 return 0;
79}
80#endif
55 81
56static struct xt_target ebt_mark_tg_reg __read_mostly = { 82static struct xt_target ebt_mark_tg_reg __read_mostly = {
57 .name = "mark", 83 .name = "mark",
@@ -59,7 +85,12 @@ static struct xt_target ebt_mark_tg_reg __read_mostly = {
59 .family = NFPROTO_BRIDGE, 85 .family = NFPROTO_BRIDGE,
60 .target = ebt_mark_tg, 86 .target = ebt_mark_tg,
61 .checkentry = ebt_mark_tg_check, 87 .checkentry = ebt_mark_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_mark_t_info)), 88 .targetsize = sizeof(struct ebt_mark_t_info),
89#ifdef CONFIG_COMPAT
90 .compatsize = sizeof(struct compat_ebt_mark_t_info),
91 .compat_from_user = mark_tg_compat_from_user,
92 .compat_to_user = mark_tg_compat_to_user,
93#endif
63 .me = THIS_MODULE, 94 .me = THIS_MODULE,
64}; 95};
65 96
diff --git a/net/bridge/netfilter/ebt_mark_m.c b/net/bridge/netfilter/ebt_mark_m.c
index ea570f214b1d..8de8c396d913 100644
--- a/net/bridge/netfilter/ebt_mark_m.c
+++ b/net/bridge/netfilter/ebt_mark_m.c
@@ -35,13 +35,50 @@ static bool ebt_mark_mt_check(const struct xt_mtchk_param *par)
35 return true; 35 return true;
36} 36}
37 37
38
39#ifdef CONFIG_COMPAT
40struct compat_ebt_mark_m_info {
41 compat_ulong_t mark, mask;
42 uint8_t invert, bitmask;
43};
44
45static void mark_mt_compat_from_user(void *dst, const void *src)
46{
47 const struct compat_ebt_mark_m_info *user = src;
48 struct ebt_mark_m_info *kern = dst;
49
50 kern->mark = user->mark;
51 kern->mask = user->mask;
52 kern->invert = user->invert;
53 kern->bitmask = user->bitmask;
54}
55
56static int mark_mt_compat_to_user(void __user *dst, const void *src)
57{
58 struct compat_ebt_mark_m_info __user *user = dst;
59 const struct ebt_mark_m_info *kern = src;
60
61 if (put_user(kern->mark, &user->mark) ||
62 put_user(kern->mask, &user->mask) ||
63 put_user(kern->invert, &user->invert) ||
64 put_user(kern->bitmask, &user->bitmask))
65 return -EFAULT;
66 return 0;
67}
68#endif
69
38static struct xt_match ebt_mark_mt_reg __read_mostly = { 70static struct xt_match ebt_mark_mt_reg __read_mostly = {
39 .name = "mark_m", 71 .name = "mark_m",
40 .revision = 0, 72 .revision = 0,
41 .family = NFPROTO_BRIDGE, 73 .family = NFPROTO_BRIDGE,
42 .match = ebt_mark_mt, 74 .match = ebt_mark_mt,
43 .checkentry = ebt_mark_mt_check, 75 .checkentry = ebt_mark_mt_check,
44 .matchsize = XT_ALIGN(sizeof(struct ebt_mark_m_info)), 76 .matchsize = sizeof(struct ebt_mark_m_info),
77#ifdef CONFIG_COMPAT
78 .compatsize = sizeof(struct compat_ebt_mark_m_info),
79 .compat_from_user = mark_mt_compat_from_user,
80 .compat_to_user = mark_mt_compat_to_user,
81#endif
45 .me = THIS_MODULE, 82 .me = THIS_MODULE,
46}; 83};
47 84
diff --git a/net/bridge/netfilter/ebt_nflog.c b/net/bridge/netfilter/ebt_nflog.c
index 2a63d996dd4e..40dbd248b9ae 100644
--- a/net/bridge/netfilter/ebt_nflog.c
+++ b/net/bridge/netfilter/ebt_nflog.c
@@ -51,7 +51,7 @@ static struct xt_target ebt_nflog_tg_reg __read_mostly = {
51 .family = NFPROTO_BRIDGE, 51 .family = NFPROTO_BRIDGE,
52 .target = ebt_nflog_tg, 52 .target = ebt_nflog_tg,
53 .checkentry = ebt_nflog_tg_check, 53 .checkentry = ebt_nflog_tg_check,
54 .targetsize = XT_ALIGN(sizeof(struct ebt_nflog_info)), 54 .targetsize = sizeof(struct ebt_nflog_info),
55 .me = THIS_MODULE, 55 .me = THIS_MODULE,
56}; 56};
57 57
diff --git a/net/bridge/netfilter/ebt_pkttype.c b/net/bridge/netfilter/ebt_pkttype.c
index 883e96e2a542..e2a07e6cbef3 100644
--- a/net/bridge/netfilter/ebt_pkttype.c
+++ b/net/bridge/netfilter/ebt_pkttype.c
@@ -36,7 +36,7 @@ static struct xt_match ebt_pkttype_mt_reg __read_mostly = {
36 .family = NFPROTO_BRIDGE, 36 .family = NFPROTO_BRIDGE,
37 .match = ebt_pkttype_mt, 37 .match = ebt_pkttype_mt,
38 .checkentry = ebt_pkttype_mt_check, 38 .checkentry = ebt_pkttype_mt_check,
39 .matchsize = XT_ALIGN(sizeof(struct ebt_pkttype_info)), 39 .matchsize = sizeof(struct ebt_pkttype_info),
40 .me = THIS_MODULE, 40 .me = THIS_MODULE,
41}; 41};
42 42
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index c8a49f7a57ba..9be8fbcd370b 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -59,7 +59,7 @@ static struct xt_target ebt_redirect_tg_reg __read_mostly = {
59 (1 << NF_BR_BROUTING), 59 (1 << NF_BR_BROUTING),
60 .target = ebt_redirect_tg, 60 .target = ebt_redirect_tg,
61 .checkentry = ebt_redirect_tg_check, 61 .checkentry = ebt_redirect_tg_check,
62 .targetsize = XT_ALIGN(sizeof(struct ebt_redirect_info)), 62 .targetsize = sizeof(struct ebt_redirect_info),
63 .me = THIS_MODULE, 63 .me = THIS_MODULE,
64}; 64};
65 65
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 8d04d4c302bd..9c7b520765a2 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -67,7 +67,7 @@ static struct xt_target ebt_snat_tg_reg __read_mostly = {
67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING), 67 .hooks = (1 << NF_BR_NUMHOOKS) | (1 << NF_BR_POST_ROUTING),
68 .target = ebt_snat_tg, 68 .target = ebt_snat_tg,
69 .checkentry = ebt_snat_tg_check, 69 .checkentry = ebt_snat_tg_check,
70 .targetsize = XT_ALIGN(sizeof(struct ebt_nat_info)), 70 .targetsize = sizeof(struct ebt_nat_info),
71 .me = THIS_MODULE, 71 .me = THIS_MODULE,
72}; 72};
73 73
diff --git a/net/bridge/netfilter/ebt_stp.c b/net/bridge/netfilter/ebt_stp.c
index 48527e621626..92a93d363765 100644
--- a/net/bridge/netfilter/ebt_stp.c
+++ b/net/bridge/netfilter/ebt_stp.c
@@ -135,8 +135,8 @@ ebt_stp_mt(const struct sk_buff *skb, const struct xt_match_param *par)
135 if (memcmp(sp, header, sizeof(header))) 135 if (memcmp(sp, header, sizeof(header)))
136 return false; 136 return false;
137 137
138 if (info->bitmask & EBT_STP_TYPE 138 if (info->bitmask & EBT_STP_TYPE &&
139 && FWINV(info->type != sp->type, EBT_STP_TYPE)) 139 FWINV(info->type != sp->type, EBT_STP_TYPE))
140 return false; 140 return false;
141 141
142 if (sp->type == BPDU_TYPE_CONFIG && 142 if (sp->type == BPDU_TYPE_CONFIG &&
@@ -177,7 +177,7 @@ static struct xt_match ebt_stp_mt_reg __read_mostly = {
177 .family = NFPROTO_BRIDGE, 177 .family = NFPROTO_BRIDGE,
178 .match = ebt_stp_mt, 178 .match = ebt_stp_mt,
179 .checkentry = ebt_stp_mt_check, 179 .checkentry = ebt_stp_mt_check,
180 .matchsize = XT_ALIGN(sizeof(struct ebt_stp_info)), 180 .matchsize = sizeof(struct ebt_stp_info),
181 .me = THIS_MODULE, 181 .me = THIS_MODULE,
182}; 182};
183 183
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index ce50688a6431..f9560f3dbdc7 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -29,6 +29,7 @@
29 */ 29 */
30 30
31#include <linux/module.h> 31#include <linux/module.h>
32#include <linux/slab.h>
32#include <linux/spinlock.h> 33#include <linux/spinlock.h>
33#include <linux/socket.h> 34#include <linux/socket.h>
34#include <linux/skbuff.h> 35#include <linux/skbuff.h>
@@ -275,7 +276,7 @@ static struct xt_target ebt_ulog_tg_reg __read_mostly = {
275 .family = NFPROTO_BRIDGE, 276 .family = NFPROTO_BRIDGE,
276 .target = ebt_ulog_tg, 277 .target = ebt_ulog_tg,
277 .checkentry = ebt_ulog_tg_check, 278 .checkentry = ebt_ulog_tg_check,
278 .targetsize = XT_ALIGN(sizeof(struct ebt_ulog_info)), 279 .targetsize = sizeof(struct ebt_ulog_info),
279 .me = THIS_MODULE, 280 .me = THIS_MODULE,
280}; 281};
281 282
diff --git a/net/bridge/netfilter/ebt_vlan.c b/net/bridge/netfilter/ebt_vlan.c
index 3dddd489328e..be1dd2e1f615 100644
--- a/net/bridge/netfilter/ebt_vlan.c
+++ b/net/bridge/netfilter/ebt_vlan.c
@@ -163,7 +163,7 @@ static struct xt_match ebt_vlan_mt_reg __read_mostly = {
163 .family = NFPROTO_BRIDGE, 163 .family = NFPROTO_BRIDGE,
164 .match = ebt_vlan_mt, 164 .match = ebt_vlan_mt,
165 .checkentry = ebt_vlan_mt_check, 165 .checkentry = ebt_vlan_mt_check,
166 .matchsize = XT_ALIGN(sizeof(struct ebt_vlan_info)), 166 .matchsize = sizeof(struct ebt_vlan_info),
167 .me = THIS_MODULE, 167 .me = THIS_MODULE,
168}; 168};
169 169
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index d32ab13e728c..ae3f106c3908 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -71,7 +71,7 @@ static int __net_init broute_net_init(struct net *net)
71 71
72static void __net_exit broute_net_exit(struct net *net) 72static void __net_exit broute_net_exit(struct net *net)
73{ 73{
74 ebt_unregister_table(net->xt.broute_table); 74 ebt_unregister_table(net, net->xt.broute_table);
75} 75}
76 76
77static struct pernet_operations broute_net_ops = { 77static struct pernet_operations broute_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c
index 60b1a6ca7185..42e6bd094574 100644
--- a/net/bridge/netfilter/ebtable_filter.c
+++ b/net/bridge/netfilter/ebtable_filter.c
@@ -107,7 +107,7 @@ static int __net_init frame_filter_net_init(struct net *net)
107 107
108static void __net_exit frame_filter_net_exit(struct net *net) 108static void __net_exit frame_filter_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_filter); 110 ebt_unregister_table(net, net->xt.frame_filter);
111} 111}
112 112
113static struct pernet_operations frame_filter_net_ops = { 113static struct pernet_operations frame_filter_net_ops = {
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c
index 4a98804203b0..6dc2f878ae05 100644
--- a/net/bridge/netfilter/ebtable_nat.c
+++ b/net/bridge/netfilter/ebtable_nat.c
@@ -107,7 +107,7 @@ static int __net_init frame_nat_net_init(struct net *net)
107 107
108static void __net_exit frame_nat_net_exit(struct net *net) 108static void __net_exit frame_nat_net_exit(struct net *net)
109{ 109{
110 ebt_unregister_table(net->xt.frame_nat); 110 ebt_unregister_table(net, net->xt.frame_nat);
111} 111}
112 112
113static struct pernet_operations frame_nat_net_ops = { 113static struct pernet_operations frame_nat_net_ops = {
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index bd1c65425d4f..f0865fd1e3ec 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -23,6 +23,7 @@
23#include <linux/netfilter_bridge/ebtables.h> 23#include <linux/netfilter_bridge/ebtables.h>
24#include <linux/spinlock.h> 24#include <linux/spinlock.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/slab.h>
26#include <asm/uaccess.h> 27#include <asm/uaccess.h>
27#include <linux/smp.h> 28#include <linux/smp.h>
28#include <linux/cpumask.h> 29#include <linux/cpumask.h>
@@ -33,11 +34,6 @@
33#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\ 34#define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 "report to author: "format, ## args) 35 "report to author: "format, ## args)
35/* #define BUGPRINT(format, args...) */ 36/* #define BUGPRINT(format, args...) */
36#define MEMPRINT(format, args...) printk("kernel msg: ebtables "\
37 ": out of memory: "format, ## args)
38/* #define MEMPRINT(format, args...) */
39
40
41 37
42/* 38/*
43 * Each cpu has its own set of counters, so there is no need for write_lock in 39 * Each cpu has its own set of counters, so there is no need for write_lock in
@@ -56,11 +52,37 @@
56 52
57static DEFINE_MUTEX(ebt_mutex); 53static DEFINE_MUTEX(ebt_mutex);
58 54
55#ifdef CONFIG_COMPAT
56static void ebt_standard_compat_from_user(void *dst, const void *src)
57{
58 int v = *(compat_int_t *)src;
59
60 if (v >= 0)
61 v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
62 memcpy(dst, &v, sizeof(v));
63}
64
65static int ebt_standard_compat_to_user(void __user *dst, const void *src)
66{
67 compat_int_t cv = *(int *)src;
68
69 if (cv >= 0)
70 cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
71 return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
72}
73#endif
74
75
59static struct xt_target ebt_standard_target = { 76static struct xt_target ebt_standard_target = {
60 .name = "standard", 77 .name = "standard",
61 .revision = 0, 78 .revision = 0,
62 .family = NFPROTO_BRIDGE, 79 .family = NFPROTO_BRIDGE,
63 .targetsize = sizeof(int), 80 .targetsize = sizeof(int),
81#ifdef CONFIG_COMPAT
82 .compatsize = sizeof(compat_int_t),
83 .compat_from_user = ebt_standard_compat_from_user,
84 .compat_to_user = ebt_standard_compat_to_user,
85#endif
64}; 86};
65 87
66static inline int 88static inline int
@@ -82,7 +104,8 @@ static inline int ebt_do_match (struct ebt_entry_match *m,
82 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH; 104 return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
83} 105}
84 106
85static inline int ebt_dev_check(char *entry, const struct net_device *device) 107static inline int
108ebt_dev_check(const char *entry, const struct net_device *device)
86{ 109{
87 int i = 0; 110 int i = 0;
88 const char *devname; 111 const char *devname;
@@ -100,8 +123,9 @@ static inline int ebt_dev_check(char *entry, const struct net_device *device)
100 123
101#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg)) 124#define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
102/* process standard matches */ 125/* process standard matches */
103static inline int ebt_basic_match(struct ebt_entry *e, struct ethhdr *h, 126static inline int
104 const struct net_device *in, const struct net_device *out) 127ebt_basic_match(const struct ebt_entry *e, const struct ethhdr *h,
128 const struct net_device *in, const struct net_device *out)
105{ 129{
106 int verdict, i; 130 int verdict, i;
107 131
@@ -156,12 +180,12 @@ unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
156 int i, nentries; 180 int i, nentries;
157 struct ebt_entry *point; 181 struct ebt_entry *point;
158 struct ebt_counter *counter_base, *cb_base; 182 struct ebt_counter *counter_base, *cb_base;
159 struct ebt_entry_target *t; 183 const struct ebt_entry_target *t;
160 int verdict, sp = 0; 184 int verdict, sp = 0;
161 struct ebt_chainstack *cs; 185 struct ebt_chainstack *cs;
162 struct ebt_entries *chaininfo; 186 struct ebt_entries *chaininfo;
163 char *base; 187 const char *base;
164 struct ebt_table_info *private; 188 const struct ebt_table_info *private;
165 bool hotdrop = false; 189 bool hotdrop = false;
166 struct xt_match_param mtpar; 190 struct xt_match_param mtpar;
167 struct xt_target_param tgpar; 191 struct xt_target_param tgpar;
@@ -395,7 +419,7 @@ ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
395 return 0; 419 return 0;
396} 420}
397 421
398static int ebt_verify_pointers(struct ebt_replace *repl, 422static int ebt_verify_pointers(const struct ebt_replace *repl,
399 struct ebt_table_info *newinfo) 423 struct ebt_table_info *newinfo)
400{ 424{
401 unsigned int limit = repl->entries_size; 425 unsigned int limit = repl->entries_size;
@@ -442,6 +466,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
442 break; 466 break;
443 if (left < e->next_offset) 467 if (left < e->next_offset)
444 break; 468 break;
469 if (e->next_offset < sizeof(struct ebt_entry))
470 return -EINVAL;
445 offset += e->next_offset; 471 offset += e->next_offset;
446 } 472 }
447 } 473 }
@@ -466,8 +492,8 @@ static int ebt_verify_pointers(struct ebt_replace *repl,
466 * to parse the userspace data 492 * to parse the userspace data
467 */ 493 */
468static inline int 494static inline int
469ebt_check_entry_size_and_hooks(struct ebt_entry *e, 495ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
470 struct ebt_table_info *newinfo, 496 const struct ebt_table_info *newinfo,
471 unsigned int *n, unsigned int *cnt, 497 unsigned int *n, unsigned int *cnt,
472 unsigned int *totalcnt, unsigned int *udc_cnt) 498 unsigned int *totalcnt, unsigned int *udc_cnt)
473{ 499{
@@ -561,13 +587,14 @@ ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
561} 587}
562 588
563static inline int 589static inline int
564ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i) 590ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
565{ 591{
566 struct xt_mtdtor_param par; 592 struct xt_mtdtor_param par;
567 593
568 if (i && (*i)-- == 0) 594 if (i && (*i)-- == 0)
569 return 1; 595 return 1;
570 596
597 par.net = net;
571 par.match = m->u.match; 598 par.match = m->u.match;
572 par.matchinfo = m->data; 599 par.matchinfo = m->data;
573 par.family = NFPROTO_BRIDGE; 600 par.family = NFPROTO_BRIDGE;
@@ -578,13 +605,14 @@ ebt_cleanup_match(struct ebt_entry_match *m, unsigned int *i)
578} 605}
579 606
580static inline int 607static inline int
581ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i) 608ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
582{ 609{
583 struct xt_tgdtor_param par; 610 struct xt_tgdtor_param par;
584 611
585 if (i && (*i)-- == 0) 612 if (i && (*i)-- == 0)
586 return 1; 613 return 1;
587 614
615 par.net = net;
588 par.target = w->u.watcher; 616 par.target = w->u.watcher;
589 par.targinfo = w->data; 617 par.targinfo = w->data;
590 par.family = NFPROTO_BRIDGE; 618 par.family = NFPROTO_BRIDGE;
@@ -595,7 +623,7 @@ ebt_cleanup_watcher(struct ebt_entry_watcher *w, unsigned int *i)
595} 623}
596 624
597static inline int 625static inline int
598ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt) 626ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
599{ 627{
600 struct xt_tgdtor_param par; 628 struct xt_tgdtor_param par;
601 struct ebt_entry_target *t; 629 struct ebt_entry_target *t;
@@ -605,10 +633,11 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
605 /* we're done */ 633 /* we're done */
606 if (cnt && (*cnt)-- == 0) 634 if (cnt && (*cnt)-- == 0)
607 return 1; 635 return 1;
608 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, NULL); 636 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
609 EBT_MATCH_ITERATE(e, ebt_cleanup_match, NULL); 637 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
610 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset); 638 t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
611 639
640 par.net = net;
612 par.target = t->u.target; 641 par.target = t->u.target;
613 par.targinfo = t->data; 642 par.targinfo = t->data;
614 par.family = NFPROTO_BRIDGE; 643 par.family = NFPROTO_BRIDGE;
@@ -619,7 +648,8 @@ ebt_cleanup_entry(struct ebt_entry *e, unsigned int *cnt)
619} 648}
620 649
621static inline int 650static inline int
622ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo, 651ebt_check_entry(struct ebt_entry *e, struct net *net,
652 const struct ebt_table_info *newinfo,
623 const char *name, unsigned int *cnt, 653 const char *name, unsigned int *cnt,
624 struct ebt_cl_stack *cl_s, unsigned int udc_cnt) 654 struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
625{ 655{
@@ -671,6 +701,7 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
671 } 701 }
672 i = 0; 702 i = 0;
673 703
704 mtpar.net = tgpar.net = net;
674 mtpar.table = tgpar.table = name; 705 mtpar.table = tgpar.table = name;
675 mtpar.entryinfo = tgpar.entryinfo = e; 706 mtpar.entryinfo = tgpar.entryinfo = e;
676 mtpar.hook_mask = tgpar.hook_mask = hookmask; 707 mtpar.hook_mask = tgpar.hook_mask = hookmask;
@@ -726,9 +757,9 @@ ebt_check_entry(struct ebt_entry *e, struct ebt_table_info *newinfo,
726 (*cnt)++; 757 (*cnt)++;
727 return 0; 758 return 0;
728cleanup_watchers: 759cleanup_watchers:
729 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, &j); 760 EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
730cleanup_matches: 761cleanup_matches:
731 EBT_MATCH_ITERATE(e, ebt_cleanup_match, &i); 762 EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
732 return ret; 763 return ret;
733} 764}
734 765
@@ -737,12 +768,12 @@ cleanup_matches:
737 * the hook mask for udc tells us from which base chains the udc can be 768 * the hook mask for udc tells us from which base chains the udc can be
738 * accessed. This mask is a parameter to the check() functions of the extensions 769 * accessed. This mask is a parameter to the check() functions of the extensions
739 */ 770 */
740static int check_chainloops(struct ebt_entries *chain, struct ebt_cl_stack *cl_s, 771static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
741 unsigned int udc_cnt, unsigned int hooknr, char *base) 772 unsigned int udc_cnt, unsigned int hooknr, char *base)
742{ 773{
743 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict; 774 int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
744 struct ebt_entry *e = (struct ebt_entry *)chain->data; 775 const struct ebt_entry *e = (struct ebt_entry *)chain->data;
745 struct ebt_entry_target *t; 776 const struct ebt_entry_target *t;
746 777
747 while (pos < nentries || chain_nr != -1) { 778 while (pos < nentries || chain_nr != -1) {
748 /* end of udc, go back one 'recursion' step */ 779 /* end of udc, go back one 'recursion' step */
@@ -808,7 +839,8 @@ letscontinue:
808} 839}
809 840
810/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */ 841/* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
811static int translate_table(char *name, struct ebt_table_info *newinfo) 842static int translate_table(struct net *net, const char *name,
843 struct ebt_table_info *newinfo)
812{ 844{
813 unsigned int i, j, k, udc_cnt; 845 unsigned int i, j, k, udc_cnt;
814 int ret; 846 int ret;
@@ -917,17 +949,17 @@ static int translate_table(char *name, struct ebt_table_info *newinfo)
917 /* used to know what we need to clean up if something goes wrong */ 949 /* used to know what we need to clean up if something goes wrong */
918 i = 0; 950 i = 0;
919 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 951 ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
920 ebt_check_entry, newinfo, name, &i, cl_s, udc_cnt); 952 ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
921 if (ret != 0) { 953 if (ret != 0) {
922 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 954 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
923 ebt_cleanup_entry, &i); 955 ebt_cleanup_entry, net, &i);
924 } 956 }
925 vfree(cl_s); 957 vfree(cl_s);
926 return ret; 958 return ret;
927} 959}
928 960
929/* called under write_lock */ 961/* called under write_lock */
930static void get_counters(struct ebt_counter *oldcounters, 962static void get_counters(const struct ebt_counter *oldcounters,
931 struct ebt_counter *counters, unsigned int nentries) 963 struct ebt_counter *counters, unsigned int nentries)
932{ 964{
933 int i, cpu; 965 int i, cpu;
@@ -949,90 +981,45 @@ static void get_counters(struct ebt_counter *oldcounters,
949 } 981 }
950} 982}
951 983
952/* replace the table */ 984static int do_replace_finish(struct net *net, struct ebt_replace *repl,
953static int do_replace(struct net *net, void __user *user, unsigned int len) 985 struct ebt_table_info *newinfo)
954{ 986{
955 int ret, i, countersize; 987 int ret, i;
956 struct ebt_table_info *newinfo;
957 struct ebt_replace tmp;
958 struct ebt_table *t;
959 struct ebt_counter *counterstmp = NULL; 988 struct ebt_counter *counterstmp = NULL;
960 /* used to be able to unlock earlier */ 989 /* used to be able to unlock earlier */
961 struct ebt_table_info *table; 990 struct ebt_table_info *table;
962 991 struct ebt_table *t;
963 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
964 return -EFAULT;
965
966 if (len != sizeof(tmp) + tmp.entries_size) {
967 BUGPRINT("Wrong len argument\n");
968 return -EINVAL;
969 }
970
971 if (tmp.entries_size == 0) {
972 BUGPRINT("Entries_size never zero\n");
973 return -EINVAL;
974 }
975 /* overflow check */
976 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) / NR_CPUS -
977 SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
978 return -ENOMEM;
979 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
980 return -ENOMEM;
981
982 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
983 newinfo = vmalloc(sizeof(*newinfo) + countersize);
984 if (!newinfo)
985 return -ENOMEM;
986
987 if (countersize)
988 memset(newinfo->counters, 0, countersize);
989
990 newinfo->entries = vmalloc(tmp.entries_size);
991 if (!newinfo->entries) {
992 ret = -ENOMEM;
993 goto free_newinfo;
994 }
995 if (copy_from_user(
996 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
997 BUGPRINT("Couldn't copy entries from userspace\n");
998 ret = -EFAULT;
999 goto free_entries;
1000 }
1001 992
1002 /* the user wants counters back 993 /* the user wants counters back
1003 the check on the size is done later, when we have the lock */ 994 the check on the size is done later, when we have the lock */
1004 if (tmp.num_counters) { 995 if (repl->num_counters) {
1005 counterstmp = vmalloc(tmp.num_counters * sizeof(*counterstmp)); 996 unsigned long size = repl->num_counters * sizeof(*counterstmp);
1006 if (!counterstmp) { 997 counterstmp = vmalloc(size);
1007 ret = -ENOMEM; 998 if (!counterstmp)
1008 goto free_entries; 999 return -ENOMEM;
1009 }
1010 } 1000 }
1011 else
1012 counterstmp = NULL;
1013 1001
1014 /* this can get initialized by translate_table() */
1015 newinfo->chainstack = NULL; 1002 newinfo->chainstack = NULL;
1016 ret = ebt_verify_pointers(&tmp, newinfo); 1003 ret = ebt_verify_pointers(repl, newinfo);
1017 if (ret != 0) 1004 if (ret != 0)
1018 goto free_counterstmp; 1005 goto free_counterstmp;
1019 1006
1020 ret = translate_table(tmp.name, newinfo); 1007 ret = translate_table(net, repl->name, newinfo);
1021 1008
1022 if (ret != 0) 1009 if (ret != 0)
1023 goto free_counterstmp; 1010 goto free_counterstmp;
1024 1011
1025 t = find_table_lock(net, tmp.name, &ret, &ebt_mutex); 1012 t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1026 if (!t) { 1013 if (!t) {
1027 ret = -ENOENT; 1014 ret = -ENOENT;
1028 goto free_iterate; 1015 goto free_iterate;
1029 } 1016 }
1030 1017
1031 /* the table doesn't like it */ 1018 /* the table doesn't like it */
1032 if (t->check && (ret = t->check(newinfo, tmp.valid_hooks))) 1019 if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1033 goto free_unlock; 1020 goto free_unlock;
1034 1021
1035 if (tmp.num_counters && tmp.num_counters != t->private->nentries) { 1022 if (repl->num_counters && repl->num_counters != t->private->nentries) {
1036 BUGPRINT("Wrong nr. of counters requested\n"); 1023 BUGPRINT("Wrong nr. of counters requested\n");
1037 ret = -EINVAL; 1024 ret = -EINVAL;
1038 goto free_unlock; 1025 goto free_unlock;
@@ -1048,7 +1035,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1048 module_put(t->me); 1035 module_put(t->me);
1049 /* we need an atomic snapshot of the counters */ 1036 /* we need an atomic snapshot of the counters */
1050 write_lock_bh(&t->lock); 1037 write_lock_bh(&t->lock);
1051 if (tmp.num_counters) 1038 if (repl->num_counters)
1052 get_counters(t->private->counters, counterstmp, 1039 get_counters(t->private->counters, counterstmp,
1053 t->private->nentries); 1040 t->private->nentries);
1054 1041
@@ -1059,10 +1046,9 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1059 allocation. Only reason why this is done is because this way the lock 1046 allocation. Only reason why this is done is because this way the lock
1060 is held only once, while this doesn't bring the kernel into a 1047 is held only once, while this doesn't bring the kernel into a
1061 dangerous state. */ 1048 dangerous state. */
1062 if (tmp.num_counters && 1049 if (repl->num_counters &&
1063 copy_to_user(tmp.counters, counterstmp, 1050 copy_to_user(repl->counters, counterstmp,
1064 tmp.num_counters * sizeof(struct ebt_counter))) { 1051 repl->num_counters * sizeof(struct ebt_counter))) {
1065 BUGPRINT("Couldn't copy counters to userspace\n");
1066 ret = -EFAULT; 1052 ret = -EFAULT;
1067 } 1053 }
1068 else 1054 else
@@ -1070,7 +1056,7 @@ static int do_replace(struct net *net, void __user *user, unsigned int len)
1070 1056
1071 /* decrease module count and free resources */ 1057 /* decrease module count and free resources */
1072 EBT_ENTRY_ITERATE(table->entries, table->entries_size, 1058 EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1073 ebt_cleanup_entry, NULL); 1059 ebt_cleanup_entry, net, NULL);
1074 1060
1075 vfree(table->entries); 1061 vfree(table->entries);
1076 if (table->chainstack) { 1062 if (table->chainstack) {
@@ -1087,7 +1073,7 @@ free_unlock:
1087 mutex_unlock(&ebt_mutex); 1073 mutex_unlock(&ebt_mutex);
1088free_iterate: 1074free_iterate:
1089 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size, 1075 EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1090 ebt_cleanup_entry, NULL); 1076 ebt_cleanup_entry, net, NULL);
1091free_counterstmp: 1077free_counterstmp:
1092 vfree(counterstmp); 1078 vfree(counterstmp);
1093 /* can be initialized in translate_table() */ 1079 /* can be initialized in translate_table() */
@@ -1096,6 +1082,59 @@ free_counterstmp:
1096 vfree(newinfo->chainstack[i]); 1082 vfree(newinfo->chainstack[i]);
1097 vfree(newinfo->chainstack); 1083 vfree(newinfo->chainstack);
1098 } 1084 }
1085 return ret;
1086}
1087
1088/* replace the table */
1089static int do_replace(struct net *net, const void __user *user,
1090 unsigned int len)
1091{
1092 int ret, countersize;
1093 struct ebt_table_info *newinfo;
1094 struct ebt_replace tmp;
1095
1096 if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1097 return -EFAULT;
1098
1099 if (len != sizeof(tmp) + tmp.entries_size) {
1100 BUGPRINT("Wrong len argument\n");
1101 return -EINVAL;
1102 }
1103
1104 if (tmp.entries_size == 0) {
1105 BUGPRINT("Entries_size never zero\n");
1106 return -EINVAL;
1107 }
1108 /* overflow check */
1109 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1110 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1111 return -ENOMEM;
1112 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1113 return -ENOMEM;
1114
1115 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1116 newinfo = vmalloc(sizeof(*newinfo) + countersize);
1117 if (!newinfo)
1118 return -ENOMEM;
1119
1120 if (countersize)
1121 memset(newinfo->counters, 0, countersize);
1122
1123 newinfo->entries = vmalloc(tmp.entries_size);
1124 if (!newinfo->entries) {
1125 ret = -ENOMEM;
1126 goto free_newinfo;
1127 }
1128 if (copy_from_user(
1129 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1130 BUGPRINT("Couldn't copy entries from userspace\n");
1131 ret = -EFAULT;
1132 goto free_entries;
1133 }
1134
1135 ret = do_replace_finish(net, &tmp, newinfo);
1136 if (ret == 0)
1137 return ret;
1099free_entries: 1138free_entries:
1100 vfree(newinfo->entries); 1139 vfree(newinfo->entries);
1101free_newinfo: 1140free_newinfo:
@@ -1154,7 +1193,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table)
1154 newinfo->hook_entry[i] = p + 1193 newinfo->hook_entry[i] = p +
1155 ((char *)repl->hook_entry[i] - repl->entries); 1194 ((char *)repl->hook_entry[i] - repl->entries);
1156 } 1195 }
1157 ret = translate_table(repl->name, newinfo); 1196 ret = translate_table(net, repl->name, newinfo);
1158 if (ret != 0) { 1197 if (ret != 0) {
1159 BUGPRINT("Translate_table failed\n"); 1198 BUGPRINT("Translate_table failed\n");
1160 goto free_chainstack; 1199 goto free_chainstack;
@@ -1204,7 +1243,7 @@ out:
1204 return ERR_PTR(ret); 1243 return ERR_PTR(ret);
1205} 1244}
1206 1245
1207void ebt_unregister_table(struct ebt_table *table) 1246void ebt_unregister_table(struct net *net, struct ebt_table *table)
1208{ 1247{
1209 int i; 1248 int i;
1210 1249
@@ -1216,7 +1255,7 @@ void ebt_unregister_table(struct ebt_table *table)
1216 list_del(&table->list); 1255 list_del(&table->list);
1217 mutex_unlock(&ebt_mutex); 1256 mutex_unlock(&ebt_mutex);
1218 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size, 1257 EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1219 ebt_cleanup_entry, NULL); 1258 ebt_cleanup_entry, net, NULL);
1220 if (table->private->nentries) 1259 if (table->private->nentries)
1221 module_put(table->me); 1260 module_put(table->me);
1222 vfree(table->private->entries); 1261 vfree(table->private->entries);
@@ -1230,39 +1269,33 @@ void ebt_unregister_table(struct ebt_table *table)
1230} 1269}
1231 1270
1232/* userspace just supplied us with counters */ 1271/* userspace just supplied us with counters */
1233static int update_counters(struct net *net, void __user *user, unsigned int len) 1272static int do_update_counters(struct net *net, const char *name,
1273 struct ebt_counter __user *counters,
1274 unsigned int num_counters,
1275 const void __user *user, unsigned int len)
1234{ 1276{
1235 int i, ret; 1277 int i, ret;
1236 struct ebt_counter *tmp; 1278 struct ebt_counter *tmp;
1237 struct ebt_replace hlp;
1238 struct ebt_table *t; 1279 struct ebt_table *t;
1239 1280
1240 if (copy_from_user(&hlp, user, sizeof(hlp))) 1281 if (num_counters == 0)
1241 return -EFAULT;
1242
1243 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1244 return -EINVAL;
1245 if (hlp.num_counters == 0)
1246 return -EINVAL; 1282 return -EINVAL;
1247 1283
1248 if (!(tmp = vmalloc(hlp.num_counters * sizeof(*tmp)))) { 1284 tmp = vmalloc(num_counters * sizeof(*tmp));
1249 MEMPRINT("Update_counters && nomemory\n"); 1285 if (!tmp)
1250 return -ENOMEM; 1286 return -ENOMEM;
1251 }
1252 1287
1253 t = find_table_lock(net, hlp.name, &ret, &ebt_mutex); 1288 t = find_table_lock(net, name, &ret, &ebt_mutex);
1254 if (!t) 1289 if (!t)
1255 goto free_tmp; 1290 goto free_tmp;
1256 1291
1257 if (hlp.num_counters != t->private->nentries) { 1292 if (num_counters != t->private->nentries) {
1258 BUGPRINT("Wrong nr of counters\n"); 1293 BUGPRINT("Wrong nr of counters\n");
1259 ret = -EINVAL; 1294 ret = -EINVAL;
1260 goto unlock_mutex; 1295 goto unlock_mutex;
1261 } 1296 }
1262 1297
1263 if ( copy_from_user(tmp, hlp.counters, 1298 if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1264 hlp.num_counters * sizeof(struct ebt_counter)) ) {
1265 BUGPRINT("Updata_counters && !cfu\n");
1266 ret = -EFAULT; 1299 ret = -EFAULT;
1267 goto unlock_mutex; 1300 goto unlock_mutex;
1268 } 1301 }
@@ -1271,7 +1304,7 @@ static int update_counters(struct net *net, void __user *user, unsigned int len)
1271 write_lock_bh(&t->lock); 1304 write_lock_bh(&t->lock);
1272 1305
1273 /* we add to the counters of the first cpu */ 1306 /* we add to the counters of the first cpu */
1274 for (i = 0; i < hlp.num_counters; i++) { 1307 for (i = 0; i < num_counters; i++) {
1275 t->private->counters[i].pcnt += tmp[i].pcnt; 1308 t->private->counters[i].pcnt += tmp[i].pcnt;
1276 t->private->counters[i].bcnt += tmp[i].bcnt; 1309 t->private->counters[i].bcnt += tmp[i].bcnt;
1277 } 1310 }
@@ -1285,8 +1318,23 @@ free_tmp:
1285 return ret; 1318 return ret;
1286} 1319}
1287 1320
1288static inline int ebt_make_matchname(struct ebt_entry_match *m, 1321static int update_counters(struct net *net, const void __user *user,
1289 char *base, char __user *ubase) 1322 unsigned int len)
1323{
1324 struct ebt_replace hlp;
1325
1326 if (copy_from_user(&hlp, user, sizeof(hlp)))
1327 return -EFAULT;
1328
1329 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1330 return -EINVAL;
1331
1332 return do_update_counters(net, hlp.name, hlp.counters,
1333 hlp.num_counters, user, len);
1334}
1335
1336static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1337 const char *base, char __user *ubase)
1290{ 1338{
1291 char __user *hlp = ubase + ((char *)m - base); 1339 char __user *hlp = ubase + ((char *)m - base);
1292 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN)) 1340 if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1294,8 +1342,8 @@ static inline int ebt_make_matchname(struct ebt_entry_match *m,
1294 return 0; 1342 return 0;
1295} 1343}
1296 1344
1297static inline int ebt_make_watchername(struct ebt_entry_watcher *w, 1345static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1298 char *base, char __user *ubase) 1346 const char *base, char __user *ubase)
1299{ 1347{
1300 char __user *hlp = ubase + ((char *)w - base); 1348 char __user *hlp = ubase + ((char *)w - base);
1301 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN)) 1349 if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
@@ -1303,11 +1351,12 @@ static inline int ebt_make_watchername(struct ebt_entry_watcher *w,
1303 return 0; 1351 return 0;
1304} 1352}
1305 1353
1306static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *ubase) 1354static inline int
1355ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1307{ 1356{
1308 int ret; 1357 int ret;
1309 char __user *hlp; 1358 char __user *hlp;
1310 struct ebt_entry_target *t; 1359 const struct ebt_entry_target *t;
1311 1360
1312 if (e->bitmask == 0) 1361 if (e->bitmask == 0)
1313 return 0; 1362 return 0;
@@ -1326,13 +1375,46 @@ static inline int ebt_make_names(struct ebt_entry *e, char *base, char __user *u
1326 return 0; 1375 return 0;
1327} 1376}
1328 1377
1378static int copy_counters_to_user(struct ebt_table *t,
1379 const struct ebt_counter *oldcounters,
1380 void __user *user, unsigned int num_counters,
1381 unsigned int nentries)
1382{
1383 struct ebt_counter *counterstmp;
1384 int ret = 0;
1385
1386 /* userspace might not need the counters */
1387 if (num_counters == 0)
1388 return 0;
1389
1390 if (num_counters != nentries) {
1391 BUGPRINT("Num_counters wrong\n");
1392 return -EINVAL;
1393 }
1394
1395 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1396 if (!counterstmp)
1397 return -ENOMEM;
1398
1399 write_lock_bh(&t->lock);
1400 get_counters(oldcounters, counterstmp, nentries);
1401 write_unlock_bh(&t->lock);
1402
1403 if (copy_to_user(user, counterstmp,
1404 nentries * sizeof(struct ebt_counter)))
1405 ret = -EFAULT;
1406 vfree(counterstmp);
1407 return ret;
1408}
1409
1329/* called with ebt_mutex locked */ 1410/* called with ebt_mutex locked */
1330static int copy_everything_to_user(struct ebt_table *t, void __user *user, 1411static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1331 int *len, int cmd) 1412 const int *len, int cmd)
1332{ 1413{
1333 struct ebt_replace tmp; 1414 struct ebt_replace tmp;
1334 struct ebt_counter *counterstmp, *oldcounters; 1415 const struct ebt_counter *oldcounters;
1335 unsigned int entries_size, nentries; 1416 unsigned int entries_size, nentries;
1417 int ret;
1336 char *entries; 1418 char *entries;
1337 1419
1338 if (cmd == EBT_SO_GET_ENTRIES) { 1420 if (cmd == EBT_SO_GET_ENTRIES) {
@@ -1347,16 +1429,12 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1347 oldcounters = t->table->counters; 1429 oldcounters = t->table->counters;
1348 } 1430 }
1349 1431
1350 if (copy_from_user(&tmp, user, sizeof(tmp))) { 1432 if (copy_from_user(&tmp, user, sizeof(tmp)))
1351 BUGPRINT("Cfu didn't work\n");
1352 return -EFAULT; 1433 return -EFAULT;
1353 }
1354 1434
1355 if (*len != sizeof(struct ebt_replace) + entries_size + 1435 if (*len != sizeof(struct ebt_replace) + entries_size +
1356 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0)) { 1436 (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1357 BUGPRINT("Wrong size\n");
1358 return -EINVAL; 1437 return -EINVAL;
1359 }
1360 1438
1361 if (tmp.nentries != nentries) { 1439 if (tmp.nentries != nentries) {
1362 BUGPRINT("Nentries wrong\n"); 1440 BUGPRINT("Nentries wrong\n");
@@ -1368,29 +1446,10 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1368 return -EINVAL; 1446 return -EINVAL;
1369 } 1447 }
1370 1448
1371 /* userspace might not need the counters */ 1449 ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1372 if (tmp.num_counters) { 1450 tmp.num_counters, nentries);
1373 if (tmp.num_counters != nentries) { 1451 if (ret)
1374 BUGPRINT("Num_counters wrong\n"); 1452 return ret;
1375 return -EINVAL;
1376 }
1377 counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1378 if (!counterstmp) {
1379 MEMPRINT("Couldn't copy counters, out of memory\n");
1380 return -ENOMEM;
1381 }
1382 write_lock_bh(&t->lock);
1383 get_counters(oldcounters, counterstmp, nentries);
1384 write_unlock_bh(&t->lock);
1385
1386 if (copy_to_user(tmp.counters, counterstmp,
1387 nentries * sizeof(struct ebt_counter))) {
1388 BUGPRINT("Couldn't copy counters to userspace\n");
1389 vfree(counterstmp);
1390 return -EFAULT;
1391 }
1392 vfree(counterstmp);
1393 }
1394 1453
1395 if (copy_to_user(tmp.entries, entries, entries_size)) { 1454 if (copy_to_user(tmp.entries, entries, entries_size)) {
1396 BUGPRINT("Couldn't copy entries to userspace\n"); 1455 BUGPRINT("Couldn't copy entries to userspace\n");
@@ -1406,6 +1465,9 @@ static int do_ebt_set_ctl(struct sock *sk,
1406{ 1465{
1407 int ret; 1466 int ret;
1408 1467
1468 if (!capable(CAP_NET_ADMIN))
1469 return -EPERM;
1470
1409 switch(cmd) { 1471 switch(cmd) {
1410 case EBT_SO_SET_ENTRIES: 1472 case EBT_SO_SET_ENTRIES:
1411 ret = do_replace(sock_net(sk), user, len); 1473 ret = do_replace(sock_net(sk), user, len);
@@ -1415,7 +1477,7 @@ static int do_ebt_set_ctl(struct sock *sk,
1415 break; 1477 break;
1416 default: 1478 default:
1417 ret = -EINVAL; 1479 ret = -EINVAL;
1418 } 1480 }
1419 return ret; 1481 return ret;
1420} 1482}
1421 1483
@@ -1425,6 +1487,9 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1425 struct ebt_replace tmp; 1487 struct ebt_replace tmp;
1426 struct ebt_table *t; 1488 struct ebt_table *t;
1427 1489
1490 if (!capable(CAP_NET_ADMIN))
1491 return -EPERM;
1492
1428 if (copy_from_user(&tmp, user, sizeof(tmp))) 1493 if (copy_from_user(&tmp, user, sizeof(tmp)))
1429 return -EFAULT; 1494 return -EFAULT;
1430 1495
@@ -1472,15 +1537,892 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1472 return ret; 1537 return ret;
1473} 1538}
1474 1539
1540#ifdef CONFIG_COMPAT
1541/* 32 bit-userspace compatibility definitions. */
1542struct compat_ebt_replace {
1543 char name[EBT_TABLE_MAXNAMELEN];
1544 compat_uint_t valid_hooks;
1545 compat_uint_t nentries;
1546 compat_uint_t entries_size;
1547 /* start of the chains */
1548 compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1549 /* nr of counters userspace expects back */
1550 compat_uint_t num_counters;
1551 /* where the kernel will put the old counters. */
1552 compat_uptr_t counters;
1553 compat_uptr_t entries;
1554};
1555
1556/* struct ebt_entry_match, _target and _watcher have same layout */
1557struct compat_ebt_entry_mwt {
1558 union {
1559 char name[EBT_FUNCTION_MAXNAMELEN];
1560 compat_uptr_t ptr;
1561 } u;
1562 compat_uint_t match_size;
1563 compat_uint_t data[0];
1564};
1565
1566/* account for possible padding between match_size and ->data */
1567static int ebt_compat_entry_padsize(void)
1568{
1569 BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1570 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1571 return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1572 COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1573}
1574
1575static int ebt_compat_match_offset(const struct xt_match *match,
1576 unsigned int userlen)
1577{
1578 /*
1579 * ebt_among needs special handling. The kernel .matchsize is
1580 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1581 * value is expected.
1582 * Example: userspace sends 4500, ebt_among.c wants 4504.
1583 */
1584 if (unlikely(match->matchsize == -1))
1585 return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1586 return xt_compat_match_offset(match);
1587}
1588
1589static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1590 unsigned int *size)
1591{
1592 const struct xt_match *match = m->u.match;
1593 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1594 int off = ebt_compat_match_offset(match, m->match_size);
1595 compat_uint_t msize = m->match_size - off;
1596
1597 BUG_ON(off >= m->match_size);
1598
1599 if (copy_to_user(cm->u.name, match->name,
1600 strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1601 return -EFAULT;
1602
1603 if (match->compat_to_user) {
1604 if (match->compat_to_user(cm->data, m->data))
1605 return -EFAULT;
1606 } else if (copy_to_user(cm->data, m->data, msize))
1607 return -EFAULT;
1608
1609 *size -= ebt_compat_entry_padsize() + off;
1610 *dstptr = cm->data;
1611 *dstptr += msize;
1612 return 0;
1613}
1614
1615static int compat_target_to_user(struct ebt_entry_target *t,
1616 void __user **dstptr,
1617 unsigned int *size)
1618{
1619 const struct xt_target *target = t->u.target;
1620 struct compat_ebt_entry_mwt __user *cm = *dstptr;
1621 int off = xt_compat_target_offset(target);
1622 compat_uint_t tsize = t->target_size - off;
1623
1624 BUG_ON(off >= t->target_size);
1625
1626 if (copy_to_user(cm->u.name, target->name,
1627 strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1628 return -EFAULT;
1629
1630 if (target->compat_to_user) {
1631 if (target->compat_to_user(cm->data, t->data))
1632 return -EFAULT;
1633 } else if (copy_to_user(cm->data, t->data, tsize))
1634 return -EFAULT;
1635
1636 *size -= ebt_compat_entry_padsize() + off;
1637 *dstptr = cm->data;
1638 *dstptr += tsize;
1639 return 0;
1640}
1641
1642static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1643 void __user **dstptr,
1644 unsigned int *size)
1645{
1646 return compat_target_to_user((struct ebt_entry_target *)w,
1647 dstptr, size);
1648}
1649
1650static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1651 unsigned int *size)
1652{
1653 struct ebt_entry_target *t;
1654 struct ebt_entry __user *ce;
1655 u32 watchers_offset, target_offset, next_offset;
1656 compat_uint_t origsize;
1657 int ret;
1658
1659 if (e->bitmask == 0) {
1660 if (*size < sizeof(struct ebt_entries))
1661 return -EINVAL;
1662 if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1663 return -EFAULT;
1664
1665 *dstptr += sizeof(struct ebt_entries);
1666 *size -= sizeof(struct ebt_entries);
1667 return 0;
1668 }
1669
1670 if (*size < sizeof(*ce))
1671 return -EINVAL;
1672
1673 ce = (struct ebt_entry __user *)*dstptr;
1674 if (copy_to_user(ce, e, sizeof(*ce)))
1675 return -EFAULT;
1676
1677 origsize = *size;
1678 *dstptr += sizeof(*ce);
1679
1680 ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1681 if (ret)
1682 return ret;
1683 watchers_offset = e->watchers_offset - (origsize - *size);
1684
1685 ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1686 if (ret)
1687 return ret;
1688 target_offset = e->target_offset - (origsize - *size);
1689
1690 t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1691
1692 ret = compat_target_to_user(t, dstptr, size);
1693 if (ret)
1694 return ret;
1695 next_offset = e->next_offset - (origsize - *size);
1696
1697 if (put_user(watchers_offset, &ce->watchers_offset) ||
1698 put_user(target_offset, &ce->target_offset) ||
1699 put_user(next_offset, &ce->next_offset))
1700 return -EFAULT;
1701
1702 *size -= sizeof(*ce);
1703 return 0;
1704}
1705
1706static int compat_calc_match(struct ebt_entry_match *m, int *off)
1707{
1708 *off += ebt_compat_match_offset(m->u.match, m->match_size);
1709 *off += ebt_compat_entry_padsize();
1710 return 0;
1711}
1712
1713static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1714{
1715 *off += xt_compat_target_offset(w->u.watcher);
1716 *off += ebt_compat_entry_padsize();
1717 return 0;
1718}
1719
1720static int compat_calc_entry(const struct ebt_entry *e,
1721 const struct ebt_table_info *info,
1722 const void *base,
1723 struct compat_ebt_replace *newinfo)
1724{
1725 const struct ebt_entry_target *t;
1726 unsigned int entry_offset;
1727 int off, ret, i;
1728
1729 if (e->bitmask == 0)
1730 return 0;
1731
1732 off = 0;
1733 entry_offset = (void *)e - base;
1734
1735 EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1736 EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1737
1738 t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1739
1740 off += xt_compat_target_offset(t->u.target);
1741 off += ebt_compat_entry_padsize();
1742
1743 newinfo->entries_size -= off;
1744
1745 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1746 if (ret)
1747 return ret;
1748
1749 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1750 const void *hookptr = info->hook_entry[i];
1751 if (info->hook_entry[i] &&
1752 (e < (struct ebt_entry *)(base - hookptr))) {
1753 newinfo->hook_entry[i] -= off;
1754 pr_debug("0x%08X -> 0x%08X\n",
1755 newinfo->hook_entry[i] + off,
1756 newinfo->hook_entry[i]);
1757 }
1758 }
1759
1760 return 0;
1761}
1762
1763
1764static int compat_table_info(const struct ebt_table_info *info,
1765 struct compat_ebt_replace *newinfo)
1766{
1767 unsigned int size = info->entries_size;
1768 const void *entries = info->entries;
1769
1770 newinfo->entries_size = size;
1771
1772 return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1773 entries, newinfo);
1774}
1775
1776static int compat_copy_everything_to_user(struct ebt_table *t,
1777 void __user *user, int *len, int cmd)
1778{
1779 struct compat_ebt_replace repl, tmp;
1780 struct ebt_counter *oldcounters;
1781 struct ebt_table_info tinfo;
1782 int ret;
1783 void __user *pos;
1784
1785 memset(&tinfo, 0, sizeof(tinfo));
1786
1787 if (cmd == EBT_SO_GET_ENTRIES) {
1788 tinfo.entries_size = t->private->entries_size;
1789 tinfo.nentries = t->private->nentries;
1790 tinfo.entries = t->private->entries;
1791 oldcounters = t->private->counters;
1792 } else {
1793 tinfo.entries_size = t->table->entries_size;
1794 tinfo.nentries = t->table->nentries;
1795 tinfo.entries = t->table->entries;
1796 oldcounters = t->table->counters;
1797 }
1798
1799 if (copy_from_user(&tmp, user, sizeof(tmp)))
1800 return -EFAULT;
1801
1802 if (tmp.nentries != tinfo.nentries ||
1803 (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1804 return -EINVAL;
1805
1806 memcpy(&repl, &tmp, sizeof(repl));
1807 if (cmd == EBT_SO_GET_ENTRIES)
1808 ret = compat_table_info(t->private, &repl);
1809 else
1810 ret = compat_table_info(&tinfo, &repl);
1811 if (ret)
1812 return ret;
1813
1814 if (*len != sizeof(tmp) + repl.entries_size +
1815 (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1816 pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1817 *len, tinfo.entries_size, repl.entries_size);
1818 return -EINVAL;
1819 }
1820
1821 /* userspace might not need the counters */
1822 ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1823 tmp.num_counters, tinfo.nentries);
1824 if (ret)
1825 return ret;
1826
1827 pos = compat_ptr(tmp.entries);
1828 return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1829 compat_copy_entry_to_user, &pos, &tmp.entries_size);
1830}
1831
1832struct ebt_entries_buf_state {
1833 char *buf_kern_start; /* kernel buffer to copy (translated) data to */
1834 u32 buf_kern_len; /* total size of kernel buffer */
1835 u32 buf_kern_offset; /* amount of data copied so far */
1836 u32 buf_user_offset; /* read position in userspace buffer */
1837};
1838
1839static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1840{
1841 state->buf_kern_offset += sz;
1842 return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1843}
1844
1845static int ebt_buf_add(struct ebt_entries_buf_state *state,
1846 void *data, unsigned int sz)
1847{
1848 if (state->buf_kern_start == NULL)
1849 goto count_only;
1850
1851 BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1852
1853 memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1854
1855 count_only:
1856 state->buf_user_offset += sz;
1857 return ebt_buf_count(state, sz);
1858}
1859
1860static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1861{
1862 char *b = state->buf_kern_start;
1863
1864 BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1865
1866 if (b != NULL && sz > 0)
1867 memset(b + state->buf_kern_offset, 0, sz);
1868 /* do not adjust ->buf_user_offset here, we added kernel-side padding */
1869 return ebt_buf_count(state, sz);
1870}
1871
1872enum compat_mwt {
1873 EBT_COMPAT_MATCH,
1874 EBT_COMPAT_WATCHER,
1875 EBT_COMPAT_TARGET,
1876};
1877
1878static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1879 enum compat_mwt compat_mwt,
1880 struct ebt_entries_buf_state *state,
1881 const unsigned char *base)
1882{
1883 char name[EBT_FUNCTION_MAXNAMELEN];
1884 struct xt_match *match;
1885 struct xt_target *wt;
1886 void *dst = NULL;
1887 int off, pad = 0, ret = 0;
1888 unsigned int size_kern, entry_offset, match_size = mwt->match_size;
1889
1890 strlcpy(name, mwt->u.name, sizeof(name));
1891
1892 if (state->buf_kern_start)
1893 dst = state->buf_kern_start + state->buf_kern_offset;
1894
1895 entry_offset = (unsigned char *) mwt - base;
1896 switch (compat_mwt) {
1897 case EBT_COMPAT_MATCH:
1898 match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
1899 name, 0), "ebt_%s", name);
1900 if (match == NULL)
1901 return -ENOENT;
1902 if (IS_ERR(match))
1903 return PTR_ERR(match);
1904
1905 off = ebt_compat_match_offset(match, match_size);
1906 if (dst) {
1907 if (match->compat_from_user)
1908 match->compat_from_user(dst, mwt->data);
1909 else
1910 memcpy(dst, mwt->data, match_size);
1911 }
1912
1913 size_kern = match->matchsize;
1914 if (unlikely(size_kern == -1))
1915 size_kern = match_size;
1916 module_put(match->me);
1917 break;
1918 case EBT_COMPAT_WATCHER: /* fallthrough */
1919 case EBT_COMPAT_TARGET:
1920 wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
1921 name, 0), "ebt_%s", name);
1922 if (wt == NULL)
1923 return -ENOENT;
1924 if (IS_ERR(wt))
1925 return PTR_ERR(wt);
1926 off = xt_compat_target_offset(wt);
1927
1928 if (dst) {
1929 if (wt->compat_from_user)
1930 wt->compat_from_user(dst, mwt->data);
1931 else
1932 memcpy(dst, mwt->data, match_size);
1933 }
1934
1935 size_kern = wt->targetsize;
1936 module_put(wt->me);
1937 break;
1938 }
1939
1940 if (!dst) {
1941 ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset,
1942 off + ebt_compat_entry_padsize());
1943 if (ret < 0)
1944 return ret;
1945 }
1946
1947 state->buf_kern_offset += match_size + off;
1948 state->buf_user_offset += match_size;
1949 pad = XT_ALIGN(size_kern) - size_kern;
1950
1951 if (pad > 0 && dst) {
1952 BUG_ON(state->buf_kern_len <= pad);
1953 BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1954 memset(dst + size_kern, 0, pad);
1955 }
1956 return off + match_size;
1957}
1958
1959/*
1960 * return size of all matches, watchers or target, including necessary
1961 * alignment and padding.
1962 */
1963static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1964 unsigned int size_left, enum compat_mwt type,
1965 struct ebt_entries_buf_state *state, const void *base)
1966{
1967 int growth = 0;
1968 char *buf;
1969
1970 if (size_left == 0)
1971 return 0;
1972
1973 buf = (char *) match32;
1974
1975 while (size_left >= sizeof(*match32)) {
1976 struct ebt_entry_match *match_kern;
1977 int ret;
1978
1979 match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1980 if (match_kern) {
1981 char *tmp;
1982 tmp = state->buf_kern_start + state->buf_kern_offset;
1983 match_kern = (struct ebt_entry_match *) tmp;
1984 }
1985 ret = ebt_buf_add(state, buf, sizeof(*match32));
1986 if (ret < 0)
1987 return ret;
1988 size_left -= sizeof(*match32);
1989
1990 /* add padding before match->data (if any) */
1991 ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1992 if (ret < 0)
1993 return ret;
1994
1995 if (match32->match_size > size_left)
1996 return -EINVAL;
1997
1998 size_left -= match32->match_size;
1999
2000 ret = compat_mtw_from_user(match32, type, state, base);
2001 if (ret < 0)
2002 return ret;
2003
2004 BUG_ON(ret < match32->match_size);
2005 growth += ret - match32->match_size;
2006 growth += ebt_compat_entry_padsize();
2007
2008 buf += sizeof(*match32);
2009 buf += match32->match_size;
2010
2011 if (match_kern)
2012 match_kern->match_size = ret;
2013
2014 WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2015 match32 = (struct compat_ebt_entry_mwt *) buf;
2016 }
2017
2018 return growth;
2019}
2020
2021#define EBT_COMPAT_WATCHER_ITERATE(e, fn, args...) \
2022({ \
2023 unsigned int __i; \
2024 int __ret = 0; \
2025 struct compat_ebt_entry_mwt *__watcher; \
2026 \
2027 for (__i = e->watchers_offset; \
2028 __i < (e)->target_offset; \
2029 __i += __watcher->watcher_size + \
2030 sizeof(struct compat_ebt_entry_mwt)) { \
2031 __watcher = (void *)(e) + __i; \
2032 __ret = fn(__watcher , ## args); \
2033 if (__ret != 0) \
2034 break; \
2035 } \
2036 if (__ret == 0) { \
2037 if (__i != (e)->target_offset) \
2038 __ret = -EINVAL; \
2039 } \
2040 __ret; \
2041})
2042
2043#define EBT_COMPAT_MATCH_ITERATE(e, fn, args...) \
2044({ \
2045 unsigned int __i; \
2046 int __ret = 0; \
2047 struct compat_ebt_entry_mwt *__match; \
2048 \
2049 for (__i = sizeof(struct ebt_entry); \
2050 __i < (e)->watchers_offset; \
2051 __i += __match->match_size + \
2052 sizeof(struct compat_ebt_entry_mwt)) { \
2053 __match = (void *)(e) + __i; \
2054 __ret = fn(__match , ## args); \
2055 if (__ret != 0) \
2056 break; \
2057 } \
2058 if (__ret == 0) { \
2059 if (__i != (e)->watchers_offset) \
2060 __ret = -EINVAL; \
2061 } \
2062 __ret; \
2063})
2064
2065/* called for all ebt_entry structures. */
2066static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2067 unsigned int *total,
2068 struct ebt_entries_buf_state *state)
2069{
2070 unsigned int i, j, startoff, new_offset = 0;
2071 /* stores match/watchers/targets & offset of next struct ebt_entry: */
2072 unsigned int offsets[4];
2073 unsigned int *offsets_update = NULL;
2074 int ret;
2075 char *buf_start;
2076
2077 if (*total < sizeof(struct ebt_entries))
2078 return -EINVAL;
2079
2080 if (!entry->bitmask) {
2081 *total -= sizeof(struct ebt_entries);
2082 return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2083 }
2084 if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2085 return -EINVAL;
2086
2087 startoff = state->buf_user_offset;
2088 /* pull in most part of ebt_entry, it does not need to be changed. */
2089 ret = ebt_buf_add(state, entry,
2090 offsetof(struct ebt_entry, watchers_offset));
2091 if (ret < 0)
2092 return ret;
2093
2094 offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2095 memcpy(&offsets[1], &entry->watchers_offset,
2096 sizeof(offsets) - sizeof(offsets[0]));
2097
2098 if (state->buf_kern_start) {
2099 buf_start = state->buf_kern_start + state->buf_kern_offset;
2100 offsets_update = (unsigned int *) buf_start;
2101 }
2102 ret = ebt_buf_add(state, &offsets[1],
2103 sizeof(offsets) - sizeof(offsets[0]));
2104 if (ret < 0)
2105 return ret;
2106 buf_start = (char *) entry;
2107 /*
2108 * 0: matches offset, always follows ebt_entry.
2109 * 1: watchers offset, from ebt_entry structure
2110 * 2: target offset, from ebt_entry structure
2111 * 3: next ebt_entry offset, from ebt_entry structure
2112 *
2113 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2114 */
2115 for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2116 struct compat_ebt_entry_mwt *match32;
2117 unsigned int size;
2118 char *buf = buf_start;
2119
2120 buf = buf_start + offsets[i];
2121 if (offsets[i] > offsets[j])
2122 return -EINVAL;
2123
2124 match32 = (struct compat_ebt_entry_mwt *) buf;
2125 size = offsets[j] - offsets[i];
2126 ret = ebt_size_mwt(match32, size, i, state, base);
2127 if (ret < 0)
2128 return ret;
2129 new_offset += ret;
2130 if (offsets_update && new_offset) {
2131 pr_debug("ebtables: change offset %d to %d\n",
2132 offsets_update[i], offsets[j] + new_offset);
2133 offsets_update[i] = offsets[j] + new_offset;
2134 }
2135 }
2136
2137 startoff = state->buf_user_offset - startoff;
2138
2139 BUG_ON(*total < startoff);
2140 *total -= startoff;
2141 return 0;
2142}
2143
2144/*
2145 * repl->entries_size is the size of the ebt_entry blob in userspace.
2146 * It might need more memory when copied to a 64 bit kernel in case
2147 * userspace is 32-bit. So, first task: find out how much memory is needed.
2148 *
2149 * Called before validation is performed.
2150 */
2151static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2152 struct ebt_entries_buf_state *state)
2153{
2154 unsigned int size_remaining = size_user;
2155 int ret;
2156
2157 ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2158 &size_remaining, state);
2159 if (ret < 0)
2160 return ret;
2161
2162 WARN_ON(size_remaining);
2163 return state->buf_kern_offset;
2164}
2165
2166
2167static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2168 void __user *user, unsigned int len)
2169{
2170 struct compat_ebt_replace tmp;
2171 int i;
2172
2173 if (len < sizeof(tmp))
2174 return -EINVAL;
2175
2176 if (copy_from_user(&tmp, user, sizeof(tmp)))
2177 return -EFAULT;
2178
2179 if (len != sizeof(tmp) + tmp.entries_size)
2180 return -EINVAL;
2181
2182 if (tmp.entries_size == 0)
2183 return -EINVAL;
2184
2185 if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2186 NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2187 return -ENOMEM;
2188 if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2189 return -ENOMEM;
2190
2191 memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2192
2193 /* starting with hook_entry, 32 vs. 64 bit structures are different */
2194 for (i = 0; i < NF_BR_NUMHOOKS; i++)
2195 repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2196
2197 repl->num_counters = tmp.num_counters;
2198 repl->counters = compat_ptr(tmp.counters);
2199 repl->entries = compat_ptr(tmp.entries);
2200 return 0;
2201}
2202
2203static int compat_do_replace(struct net *net, void __user *user,
2204 unsigned int len)
2205{
2206 int ret, i, countersize, size64;
2207 struct ebt_table_info *newinfo;
2208 struct ebt_replace tmp;
2209 struct ebt_entries_buf_state state;
2210 void *entries_tmp;
2211
2212 ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2213 if (ret) {
2214 /* try real handler in case userland supplied needed padding */
2215 if (ret == -EINVAL && do_replace(net, user, len) == 0)
2216 ret = 0;
2217 return ret;
2218 }
2219
2220 countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2221 newinfo = vmalloc(sizeof(*newinfo) + countersize);
2222 if (!newinfo)
2223 return -ENOMEM;
2224
2225 if (countersize)
2226 memset(newinfo->counters, 0, countersize);
2227
2228 memset(&state, 0, sizeof(state));
2229
2230 newinfo->entries = vmalloc(tmp.entries_size);
2231 if (!newinfo->entries) {
2232 ret = -ENOMEM;
2233 goto free_newinfo;
2234 }
2235 if (copy_from_user(
2236 newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2237 ret = -EFAULT;
2238 goto free_entries;
2239 }
2240
2241 entries_tmp = newinfo->entries;
2242
2243 xt_compat_lock(NFPROTO_BRIDGE);
2244
2245 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2246 if (ret < 0)
2247 goto out_unlock;
2248
2249 pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2250 tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2251 xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2252
2253 size64 = ret;
2254 newinfo->entries = vmalloc(size64);
2255 if (!newinfo->entries) {
2256 vfree(entries_tmp);
2257 ret = -ENOMEM;
2258 goto out_unlock;
2259 }
2260
2261 memset(&state, 0, sizeof(state));
2262 state.buf_kern_start = newinfo->entries;
2263 state.buf_kern_len = size64;
2264
2265 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2266 BUG_ON(ret < 0); /* parses same data again */
2267
2268 vfree(entries_tmp);
2269 tmp.entries_size = size64;
2270
2271 for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2272 char __user *usrptr;
2273 if (tmp.hook_entry[i]) {
2274 unsigned int delta;
2275 usrptr = (char __user *) tmp.hook_entry[i];
2276 delta = usrptr - tmp.entries;
2277 usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2278 tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2279 }
2280 }
2281
2282 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2283 xt_compat_unlock(NFPROTO_BRIDGE);
2284
2285 ret = do_replace_finish(net, &tmp, newinfo);
2286 if (ret == 0)
2287 return ret;
2288free_entries:
2289 vfree(newinfo->entries);
2290free_newinfo:
2291 vfree(newinfo);
2292 return ret;
2293out_unlock:
2294 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2295 xt_compat_unlock(NFPROTO_BRIDGE);
2296 goto free_entries;
2297}
2298
2299static int compat_update_counters(struct net *net, void __user *user,
2300 unsigned int len)
2301{
2302 struct compat_ebt_replace hlp;
2303
2304 if (copy_from_user(&hlp, user, sizeof(hlp)))
2305 return -EFAULT;
2306
2307 /* try real handler in case userland supplied needed padding */
2308 if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2309 return update_counters(net, user, len);
2310
2311 return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2312 hlp.num_counters, user, len);
2313}
2314
2315static int compat_do_ebt_set_ctl(struct sock *sk,
2316 int cmd, void __user *user, unsigned int len)
2317{
2318 int ret;
2319
2320 if (!capable(CAP_NET_ADMIN))
2321 return -EPERM;
2322
2323 switch (cmd) {
2324 case EBT_SO_SET_ENTRIES:
2325 ret = compat_do_replace(sock_net(sk), user, len);
2326 break;
2327 case EBT_SO_SET_COUNTERS:
2328 ret = compat_update_counters(sock_net(sk), user, len);
2329 break;
2330 default:
2331 ret = -EINVAL;
2332 }
2333 return ret;
2334}
2335
2336static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2337 void __user *user, int *len)
2338{
2339 int ret;
2340 struct compat_ebt_replace tmp;
2341 struct ebt_table *t;
2342
2343 if (!capable(CAP_NET_ADMIN))
2344 return -EPERM;
2345
2346 /* try real handler in case userland supplied needed padding */
2347 if ((cmd == EBT_SO_GET_INFO ||
2348 cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2349 return do_ebt_get_ctl(sk, cmd, user, len);
2350
2351 if (copy_from_user(&tmp, user, sizeof(tmp)))
2352 return -EFAULT;
2353
2354 t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2355 if (!t)
2356 return ret;
2357
2358 xt_compat_lock(NFPROTO_BRIDGE);
2359 switch (cmd) {
2360 case EBT_SO_GET_INFO:
2361 tmp.nentries = t->private->nentries;
2362 ret = compat_table_info(t->private, &tmp);
2363 if (ret)
2364 goto out;
2365 tmp.valid_hooks = t->valid_hooks;
2366
2367 if (copy_to_user(user, &tmp, *len) != 0) {
2368 ret = -EFAULT;
2369 break;
2370 }
2371 ret = 0;
2372 break;
2373 case EBT_SO_GET_INIT_INFO:
2374 tmp.nentries = t->table->nentries;
2375 tmp.entries_size = t->table->entries_size;
2376 tmp.valid_hooks = t->table->valid_hooks;
2377
2378 if (copy_to_user(user, &tmp, *len) != 0) {
2379 ret = -EFAULT;
2380 break;
2381 }
2382 ret = 0;
2383 break;
2384 case EBT_SO_GET_ENTRIES:
2385 case EBT_SO_GET_INIT_ENTRIES:
2386 /*
2387 * try real handler first in case of userland-side padding.
2388 * in case we are dealing with an 'ordinary' 32 bit binary
2389 * without 64bit compatibility padding, this will fail right
2390 * after copy_from_user when the *len argument is validated.
2391 *
2392 * the compat_ variant needs to do one pass over the kernel
2393 * data set to adjust for size differences before it the check.
2394 */
2395 if (copy_everything_to_user(t, user, len, cmd) == 0)
2396 ret = 0;
2397 else
2398 ret = compat_copy_everything_to_user(t, user, len, cmd);
2399 break;
2400 default:
2401 ret = -EINVAL;
2402 }
2403 out:
2404 xt_compat_flush_offsets(NFPROTO_BRIDGE);
2405 xt_compat_unlock(NFPROTO_BRIDGE);
2406 mutex_unlock(&ebt_mutex);
2407 return ret;
2408}
2409#endif
2410
1475static struct nf_sockopt_ops ebt_sockopts = 2411static struct nf_sockopt_ops ebt_sockopts =
1476{ 2412{
1477 .pf = PF_INET, 2413 .pf = PF_INET,
1478 .set_optmin = EBT_BASE_CTL, 2414 .set_optmin = EBT_BASE_CTL,
1479 .set_optmax = EBT_SO_SET_MAX + 1, 2415 .set_optmax = EBT_SO_SET_MAX + 1,
1480 .set = do_ebt_set_ctl, 2416 .set = do_ebt_set_ctl,
2417#ifdef CONFIG_COMPAT
2418 .compat_set = compat_do_ebt_set_ctl,
2419#endif
1481 .get_optmin = EBT_BASE_CTL, 2420 .get_optmin = EBT_BASE_CTL,
1482 .get_optmax = EBT_SO_GET_MAX + 1, 2421 .get_optmax = EBT_SO_GET_MAX + 1,
1483 .get = do_ebt_get_ctl, 2422 .get = do_ebt_get_ctl,
2423#ifdef CONFIG_COMPAT
2424 .compat_get = compat_do_ebt_get_ctl,
2425#endif
1484 .owner = THIS_MODULE, 2426 .owner = THIS_MODULE,
1485}; 2427};
1486 2428