aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge/br_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bridge/br_device.c')
-rw-r--r--net/bridge/br_device.c131
1 files changed, 126 insertions, 5 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 90a9024e5c1e..eedf2c94820e 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -13,8 +13,11 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/netdevice.h> 15#include <linux/netdevice.h>
16#include <linux/netpoll.h>
16#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
17#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/list.h>
20#include <linux/netfilter_bridge.h>
18 21
19#include <asm/uaccess.h> 22#include <asm/uaccess.h>
20#include "br_private.h" 23#include "br_private.h"
@@ -26,16 +29,24 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
26 const unsigned char *dest = skb->data; 29 const unsigned char *dest = skb->data;
27 struct net_bridge_fdb_entry *dst; 30 struct net_bridge_fdb_entry *dst;
28 struct net_bridge_mdb_entry *mdst; 31 struct net_bridge_mdb_entry *mdst;
32 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
29 33
30 BR_INPUT_SKB_CB(skb)->brdev = dev; 34#ifdef CONFIG_BRIDGE_NETFILTER
35 if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) {
36 br_nf_pre_routing_finish_bridge_slow(skb);
37 return NETDEV_TX_OK;
38 }
39#endif
31 40
32 dev->stats.tx_packets++; 41 brstats->tx_packets++;
33 dev->stats.tx_bytes += skb->len; 42 brstats->tx_bytes += skb->len;
43
44 BR_INPUT_SKB_CB(skb)->brdev = dev;
34 45
35 skb_reset_mac_header(skb); 46 skb_reset_mac_header(skb);
36 skb_pull(skb, ETH_HLEN); 47 skb_pull(skb, ETH_HLEN);
37 48
38 if (dest[0] & 1) { 49 if (is_multicast_ether_addr(dest)) {
39 if (br_multicast_rcv(br, NULL, skb)) 50 if (br_multicast_rcv(br, NULL, skb))
40 goto out; 51 goto out;
41 52
@@ -81,6 +92,31 @@ static int br_dev_stop(struct net_device *dev)
81 return 0; 92 return 0;
82} 93}
83 94
95static struct net_device_stats *br_get_stats(struct net_device *dev)
96{
97 struct net_bridge *br = netdev_priv(dev);
98 struct net_device_stats *stats = &dev->stats;
99 struct br_cpu_netstats sum = { 0 };
100 unsigned int cpu;
101
102 for_each_possible_cpu(cpu) {
103 const struct br_cpu_netstats *bstats
104 = per_cpu_ptr(br->stats, cpu);
105
106 sum.tx_bytes += bstats->tx_bytes;
107 sum.tx_packets += bstats->tx_packets;
108 sum.rx_bytes += bstats->rx_bytes;
109 sum.rx_packets += bstats->rx_packets;
110 }
111
112 stats->tx_bytes = sum.tx_bytes;
113 stats->tx_packets = sum.tx_packets;
114 stats->rx_bytes = sum.rx_bytes;
115 stats->rx_packets = sum.rx_packets;
116
117 return stats;
118}
119
84static int br_change_mtu(struct net_device *dev, int new_mtu) 120static int br_change_mtu(struct net_device *dev, int new_mtu)
85{ 121{
86 struct net_bridge *br = netdev_priv(dev); 122 struct net_bridge *br = netdev_priv(dev);
@@ -162,6 +198,78 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
162 return 0; 198 return 0;
163} 199}
164 200
201#ifdef CONFIG_NET_POLL_CONTROLLER
202static bool br_devices_support_netpoll(struct net_bridge *br)
203{
204 struct net_bridge_port *p;
205 bool ret = true;
206 int count = 0;
207 unsigned long flags;
208
209 spin_lock_irqsave(&br->lock, flags);
210 list_for_each_entry(p, &br->port_list, list) {
211 count++;
212 if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
213 !p->dev->netdev_ops->ndo_poll_controller)
214 ret = false;
215 }
216 spin_unlock_irqrestore(&br->lock, flags);
217 return count != 0 && ret;
218}
219
220static void br_poll_controller(struct net_device *br_dev)
221{
222 struct netpoll *np = br_dev->npinfo->netpoll;
223
224 if (np->real_dev != br_dev)
225 netpoll_poll_dev(np->real_dev);
226}
227
228void br_netpoll_cleanup(struct net_device *dev)
229{
230 struct net_bridge *br = netdev_priv(dev);
231 struct net_bridge_port *p, *n;
232 const struct net_device_ops *ops;
233
234 br->dev->npinfo = NULL;
235 list_for_each_entry_safe(p, n, &br->port_list, list) {
236 if (p->dev) {
237 ops = p->dev->netdev_ops;
238 if (ops->ndo_netpoll_cleanup)
239 ops->ndo_netpoll_cleanup(p->dev);
240 else
241 p->dev->npinfo = NULL;
242 }
243 }
244}
245
246void br_netpoll_disable(struct net_bridge *br,
247 struct net_device *dev)
248{
249 if (br_devices_support_netpoll(br))
250 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
251 if (dev->netdev_ops->ndo_netpoll_cleanup)
252 dev->netdev_ops->ndo_netpoll_cleanup(dev);
253 else
254 dev->npinfo = NULL;
255}
256
257void br_netpoll_enable(struct net_bridge *br,
258 struct net_device *dev)
259{
260 if (br_devices_support_netpoll(br)) {
261 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
262 if (br->dev->npinfo)
263 dev->npinfo = br->dev->npinfo;
264 } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) {
265 br->dev->priv_flags |= IFF_DISABLE_NETPOLL;
266 br_info(br,"new device %s does not support netpoll (disabling)",
267 dev->name);
268 }
269}
270
271#endif
272
165static const struct ethtool_ops br_ethtool_ops = { 273static const struct ethtool_ops br_ethtool_ops = {
166 .get_drvinfo = br_getinfo, 274 .get_drvinfo = br_getinfo,
167 .get_link = ethtool_op_get_link, 275 .get_link = ethtool_op_get_link,
@@ -180,19 +288,32 @@ static const struct net_device_ops br_netdev_ops = {
180 .ndo_open = br_dev_open, 288 .ndo_open = br_dev_open,
181 .ndo_stop = br_dev_stop, 289 .ndo_stop = br_dev_stop,
182 .ndo_start_xmit = br_dev_xmit, 290 .ndo_start_xmit = br_dev_xmit,
291 .ndo_get_stats = br_get_stats,
183 .ndo_set_mac_address = br_set_mac_address, 292 .ndo_set_mac_address = br_set_mac_address,
184 .ndo_set_multicast_list = br_dev_set_multicast_list, 293 .ndo_set_multicast_list = br_dev_set_multicast_list,
185 .ndo_change_mtu = br_change_mtu, 294 .ndo_change_mtu = br_change_mtu,
186 .ndo_do_ioctl = br_dev_ioctl, 295 .ndo_do_ioctl = br_dev_ioctl,
296#ifdef CONFIG_NET_POLL_CONTROLLER
297 .ndo_netpoll_cleanup = br_netpoll_cleanup,
298 .ndo_poll_controller = br_poll_controller,
299#endif
187}; 300};
188 301
302static void br_dev_free(struct net_device *dev)
303{
304 struct net_bridge *br = netdev_priv(dev);
305
306 free_percpu(br->stats);
307 free_netdev(dev);
308}
309
189void br_dev_setup(struct net_device *dev) 310void br_dev_setup(struct net_device *dev)
190{ 311{
191 random_ether_addr(dev->dev_addr); 312 random_ether_addr(dev->dev_addr);
192 ether_setup(dev); 313 ether_setup(dev);
193 314
194 dev->netdev_ops = &br_netdev_ops; 315 dev->netdev_ops = &br_netdev_ops;
195 dev->destructor = free_netdev; 316 dev->destructor = br_dev_free;
196 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 317 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
197 dev->tx_queue_len = 0; 318 dev->tx_queue_len = 0;
198 dev->priv_flags = IFF_EBRIDGE; 319 dev->priv_flags = IFF_EBRIDGE;