aboutsummaryrefslogtreecommitdiffstats
path: root/net/bridge/br_device.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/bridge/br_device.c')
-rw-r--r--net/bridge/br_device.c143
1 files changed, 83 insertions, 60 deletions
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index eedf2c94820e..cf09fe591fc2 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -22,7 +22,7 @@
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include "br_private.h" 23#include "br_private.h"
24 24
25/* net device transmit always called with no BH (preempt_disabled) */ 25/* net device transmit always called with BH disabled */
26netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) 26netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
27{ 27{
28 struct net_bridge *br = netdev_priv(dev); 28 struct net_bridge *br = netdev_priv(dev);
@@ -38,17 +38,26 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
38 } 38 }
39#endif 39#endif
40 40
41 u64_stats_update_begin(&brstats->syncp);
41 brstats->tx_packets++; 42 brstats->tx_packets++;
42 brstats->tx_bytes += skb->len; 43 brstats->tx_bytes += skb->len;
44 u64_stats_update_end(&brstats->syncp);
43 45
44 BR_INPUT_SKB_CB(skb)->brdev = dev; 46 BR_INPUT_SKB_CB(skb)->brdev = dev;
45 47
46 skb_reset_mac_header(skb); 48 skb_reset_mac_header(skb);
47 skb_pull(skb, ETH_HLEN); 49 skb_pull(skb, ETH_HLEN);
48 50
51 rcu_read_lock();
49 if (is_multicast_ether_addr(dest)) { 52 if (is_multicast_ether_addr(dest)) {
50 if (br_multicast_rcv(br, NULL, skb)) 53 if (unlikely(netpoll_tx_running(dev))) {
54 br_flood_deliver(br, skb);
55 goto out;
56 }
57 if (br_multicast_rcv(br, NULL, skb)) {
58 kfree_skb(skb);
51 goto out; 59 goto out;
60 }
52 61
53 mdst = br_mdb_get(br, skb); 62 mdst = br_mdb_get(br, skb);
54 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) 63 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))
@@ -61,6 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
61 br_flood_deliver(br, skb); 70 br_flood_deliver(br, skb);
62 71
63out: 72out:
73 rcu_read_unlock();
64 return NETDEV_TX_OK; 74 return NETDEV_TX_OK;
65} 75}
66 76
@@ -92,21 +102,25 @@ static int br_dev_stop(struct net_device *dev)
92 return 0; 102 return 0;
93} 103}
94 104
95static struct net_device_stats *br_get_stats(struct net_device *dev) 105static struct rtnl_link_stats64 *br_get_stats64(struct net_device *dev,
106 struct rtnl_link_stats64 *stats)
96{ 107{
97 struct net_bridge *br = netdev_priv(dev); 108 struct net_bridge *br = netdev_priv(dev);
98 struct net_device_stats *stats = &dev->stats; 109 struct br_cpu_netstats tmp, sum = { 0 };
99 struct br_cpu_netstats sum = { 0 };
100 unsigned int cpu; 110 unsigned int cpu;
101 111
102 for_each_possible_cpu(cpu) { 112 for_each_possible_cpu(cpu) {
113 unsigned int start;
103 const struct br_cpu_netstats *bstats 114 const struct br_cpu_netstats *bstats
104 = per_cpu_ptr(br->stats, cpu); 115 = per_cpu_ptr(br->stats, cpu);
105 116 do {
106 sum.tx_bytes += bstats->tx_bytes; 117 start = u64_stats_fetch_begin(&bstats->syncp);
107 sum.tx_packets += bstats->tx_packets; 118 memcpy(&tmp, bstats, sizeof(tmp));
108 sum.rx_bytes += bstats->rx_bytes; 119 } while (u64_stats_fetch_retry(&bstats->syncp, start));
109 sum.rx_packets += bstats->rx_packets; 120 sum.tx_bytes += tmp.tx_bytes;
121 sum.tx_packets += tmp.tx_packets;
122 sum.rx_bytes += tmp.rx_bytes;
123 sum.rx_packets += tmp.rx_packets;
110 } 124 }
111 125
112 stats->tx_bytes = sum.tx_bytes; 126 stats->tx_bytes = sum.tx_bytes;
@@ -127,7 +141,7 @@ static int br_change_mtu(struct net_device *dev, int new_mtu)
127 141
128#ifdef CONFIG_BRIDGE_NETFILTER 142#ifdef CONFIG_BRIDGE_NETFILTER
129 /* remember the MTU in the rtable for PMTU */ 143 /* remember the MTU in the rtable for PMTU */
130 br->fake_rtable.u.dst.metrics[RTAX_MTU - 1] = new_mtu; 144 br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
131#endif 145#endif
132 146
133 return 0; 147 return 0;
@@ -199,73 +213,81 @@ static int br_set_tx_csum(struct net_device *dev, u32 data)
199} 213}
200 214
201#ifdef CONFIG_NET_POLL_CONTROLLER 215#ifdef CONFIG_NET_POLL_CONTROLLER
202static bool br_devices_support_netpoll(struct net_bridge *br) 216static void br_poll_controller(struct net_device *br_dev)
203{ 217{
204 struct net_bridge_port *p;
205 bool ret = true;
206 int count = 0;
207 unsigned long flags;
208
209 spin_lock_irqsave(&br->lock, flags);
210 list_for_each_entry(p, &br->port_list, list) {
211 count++;
212 if ((p->dev->priv_flags & IFF_DISABLE_NETPOLL) ||
213 !p->dev->netdev_ops->ndo_poll_controller)
214 ret = false;
215 }
216 spin_unlock_irqrestore(&br->lock, flags);
217 return count != 0 && ret;
218} 218}
219 219
220static void br_poll_controller(struct net_device *br_dev) 220static void br_netpoll_cleanup(struct net_device *dev)
221{ 221{
222 struct netpoll *np = br_dev->npinfo->netpoll; 222 struct net_bridge *br = netdev_priv(dev);
223 struct net_bridge_port *p, *n;
223 224
224 if (np->real_dev != br_dev) 225 list_for_each_entry_safe(p, n, &br->port_list, list) {
225 netpoll_poll_dev(np->real_dev); 226 br_netpoll_disable(p);
227 }
226} 228}
227 229
228void br_netpoll_cleanup(struct net_device *dev) 230static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
229{ 231{
230 struct net_bridge *br = netdev_priv(dev); 232 struct net_bridge *br = netdev_priv(dev);
231 struct net_bridge_port *p, *n; 233 struct net_bridge_port *p, *n;
232 const struct net_device_ops *ops; 234 int err = 0;
233 235
234 br->dev->npinfo = NULL;
235 list_for_each_entry_safe(p, n, &br->port_list, list) { 236 list_for_each_entry_safe(p, n, &br->port_list, list) {
236 if (p->dev) { 237 if (!p->dev)
237 ops = p->dev->netdev_ops; 238 continue;
238 if (ops->ndo_netpoll_cleanup) 239
239 ops->ndo_netpoll_cleanup(p->dev); 240 err = br_netpoll_enable(p);
240 else 241 if (err)
241 p->dev->npinfo = NULL; 242 goto fail;
242 }
243 } 243 }
244
245out:
246 return err;
247
248fail:
249 br_netpoll_cleanup(dev);
250 goto out;
244} 251}
245 252
246void br_netpoll_disable(struct net_bridge *br, 253int br_netpoll_enable(struct net_bridge_port *p)
247 struct net_device *dev)
248{ 254{
249 if (br_devices_support_netpoll(br)) 255 struct netpoll *np;
250 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 256 int err = 0;
251 if (dev->netdev_ops->ndo_netpoll_cleanup) 257
252 dev->netdev_ops->ndo_netpoll_cleanup(dev); 258 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
253 else 259 err = -ENOMEM;
254 dev->npinfo = NULL; 260 if (!np)
261 goto out;
262
263 np->dev = p->dev;
264
265 err = __netpoll_setup(np);
266 if (err) {
267 kfree(np);
268 goto out;
269 }
270
271 p->np = np;
272
273out:
274 return err;
255} 275}
256 276
257void br_netpoll_enable(struct net_bridge *br, 277void br_netpoll_disable(struct net_bridge_port *p)
258 struct net_device *dev)
259{ 278{
260 if (br_devices_support_netpoll(br)) { 279 struct netpoll *np = p->np;
261 br->dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 280
262 if (br->dev->npinfo) 281 if (!np)
263 dev->npinfo = br->dev->npinfo; 282 return;
264 } else if (!(br->dev->priv_flags & IFF_DISABLE_NETPOLL)) { 283
265 br->dev->priv_flags |= IFF_DISABLE_NETPOLL; 284 p->np = NULL;
266 br_info(br,"new device %s does not support netpoll (disabling)", 285
267 dev->name); 286 /* Wait for transmitting packets to finish before freeing. */
268 } 287 synchronize_rcu_bh();
288
289 __netpoll_cleanup(np);
290 kfree(np);
269} 291}
270 292
271#endif 293#endif
@@ -288,12 +310,13 @@ static const struct net_device_ops br_netdev_ops = {
288 .ndo_open = br_dev_open, 310 .ndo_open = br_dev_open,
289 .ndo_stop = br_dev_stop, 311 .ndo_stop = br_dev_stop,
290 .ndo_start_xmit = br_dev_xmit, 312 .ndo_start_xmit = br_dev_xmit,
291 .ndo_get_stats = br_get_stats, 313 .ndo_get_stats64 = br_get_stats64,
292 .ndo_set_mac_address = br_set_mac_address, 314 .ndo_set_mac_address = br_set_mac_address,
293 .ndo_set_multicast_list = br_dev_set_multicast_list, 315 .ndo_set_multicast_list = br_dev_set_multicast_list,
294 .ndo_change_mtu = br_change_mtu, 316 .ndo_change_mtu = br_change_mtu,
295 .ndo_do_ioctl = br_dev_ioctl, 317 .ndo_do_ioctl = br_dev_ioctl,
296#ifdef CONFIG_NET_POLL_CONTROLLER 318#ifdef CONFIG_NET_POLL_CONTROLLER
319 .ndo_netpoll_setup = br_netpoll_setup,
297 .ndo_netpoll_cleanup = br_netpoll_cleanup, 320 .ndo_netpoll_cleanup = br_netpoll_cleanup,
298 .ndo_poll_controller = br_poll_controller, 321 .ndo_poll_controller = br_poll_controller,
299#endif 322#endif