diff options
-rw-r--r-- | drivers/net/bonding/bond_main.c | 6 | ||||
-rw-r--r-- | drivers/net/team/team.c | 16 | ||||
-rw-r--r-- | include/linux/netdevice.h | 8 | ||||
-rw-r--r-- | include/linux/netpoll.h | 10 | ||||
-rw-r--r-- | net/8021q/vlan_dev.c | 7 | ||||
-rw-r--r-- | net/bridge/br_device.c | 15 | ||||
-rw-r--r-- | net/bridge/br_if.c | 2 | ||||
-rw-r--r-- | net/bridge/br_private.h | 4 | ||||
-rw-r--r-- | net/core/dev.c | 17 | ||||
-rw-r--r-- | net/core/netpoll.c | 91 |
10 files changed, 91 insertions, 85 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5be34b72a048..95a6ca7d9e51 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -922,12 +922,12 @@ static inline int slave_enable_netpoll(struct slave *slave) | |||
922 | struct netpoll *np; | 922 | struct netpoll *np; |
923 | int err = 0; | 923 | int err = 0; |
924 | 924 | ||
925 | np = kzalloc(sizeof(*np), GFP_ATOMIC); | 925 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
926 | err = -ENOMEM; | 926 | err = -ENOMEM; |
927 | if (!np) | 927 | if (!np) |
928 | goto out; | 928 | goto out; |
929 | 929 | ||
930 | err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); | 930 | err = __netpoll_setup(np, slave->dev); |
931 | if (err) { | 931 | if (err) { |
932 | kfree(np); | 932 | kfree(np); |
933 | goto out; | 933 | goto out; |
@@ -962,7 +962,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) | |||
962 | slave_disable_netpoll(slave); | 962 | slave_disable_netpoll(slave); |
963 | } | 963 | } |
964 | 964 | ||
965 | static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) | 965 | static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) |
966 | { | 966 | { |
967 | struct bonding *bond = netdev_priv(dev); | 967 | struct bonding *bond = netdev_priv(dev); |
968 | struct list_head *iter; | 968 | struct list_head *iter; |
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 2b1a1d61072c..33008c1d1d67 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c | |||
@@ -1031,8 +1031,7 @@ static void team_port_leave(struct team *team, struct team_port *port) | |||
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1033 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1034 | static int team_port_enable_netpoll(struct team *team, struct team_port *port, | 1034 | static int team_port_enable_netpoll(struct team *team, struct team_port *port) |
1035 | gfp_t gfp) | ||
1036 | { | 1035 | { |
1037 | struct netpoll *np; | 1036 | struct netpoll *np; |
1038 | int err; | 1037 | int err; |
@@ -1040,11 +1039,11 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port, | |||
1040 | if (!team->dev->npinfo) | 1039 | if (!team->dev->npinfo) |
1041 | return 0; | 1040 | return 0; |
1042 | 1041 | ||
1043 | np = kzalloc(sizeof(*np), gfp); | 1042 | np = kzalloc(sizeof(*np), GFP_KERNEL); |
1044 | if (!np) | 1043 | if (!np) |
1045 | return -ENOMEM; | 1044 | return -ENOMEM; |
1046 | 1045 | ||
1047 | err = __netpoll_setup(np, port->dev, gfp); | 1046 | err = __netpoll_setup(np, port->dev); |
1048 | if (err) { | 1047 | if (err) { |
1049 | kfree(np); | 1048 | kfree(np); |
1050 | return err; | 1049 | return err; |
@@ -1067,8 +1066,7 @@ static void team_port_disable_netpoll(struct team_port *port) | |||
1067 | kfree(np); | 1066 | kfree(np); |
1068 | } | 1067 | } |
1069 | #else | 1068 | #else |
1070 | static int team_port_enable_netpoll(struct team *team, struct team_port *port, | 1069 | static int team_port_enable_netpoll(struct team *team, struct team_port *port) |
1071 | gfp_t gfp) | ||
1072 | { | 1070 | { |
1073 | return 0; | 1071 | return 0; |
1074 | } | 1072 | } |
@@ -1156,7 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) | |||
1156 | goto err_vids_add; | 1154 | goto err_vids_add; |
1157 | } | 1155 | } |
1158 | 1156 | ||
1159 | err = team_port_enable_netpoll(team, port, GFP_KERNEL); | 1157 | err = team_port_enable_netpoll(team, port); |
1160 | if (err) { | 1158 | if (err) { |
1161 | netdev_err(dev, "Failed to enable netpoll on device %s\n", | 1159 | netdev_err(dev, "Failed to enable netpoll on device %s\n", |
1162 | portname); | 1160 | portname); |
@@ -1850,7 +1848,7 @@ static void team_netpoll_cleanup(struct net_device *dev) | |||
1850 | } | 1848 | } |
1851 | 1849 | ||
1852 | static int team_netpoll_setup(struct net_device *dev, | 1850 | static int team_netpoll_setup(struct net_device *dev, |
1853 | struct netpoll_info *npifo, gfp_t gfp) | 1851 | struct netpoll_info *npifo) |
1854 | { | 1852 | { |
1855 | struct team *team = netdev_priv(dev); | 1853 | struct team *team = netdev_priv(dev); |
1856 | struct team_port *port; | 1854 | struct team_port *port; |
@@ -1858,7 +1856,7 @@ static int team_netpoll_setup(struct net_device *dev, | |||
1858 | 1856 | ||
1859 | mutex_lock(&team->lock); | 1857 | mutex_lock(&team->lock); |
1860 | list_for_each_entry(port, &team->port_list, list) { | 1858 | list_for_each_entry(port, &team->port_list, list) { |
1861 | err = team_port_enable_netpoll(team, port, gfp); | 1859 | err = team_port_enable_netpoll(team, port); |
1862 | if (err) { | 1860 | if (err) { |
1863 | __team_netpoll_cleanup(team); | 1861 | __team_netpoll_cleanup(team); |
1864 | break; | 1862 | break; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 159c7e7945f8..4cd5e9e13c87 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1037,8 +1037,7 @@ struct net_device_ops { | |||
1037 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1037 | #ifdef CONFIG_NET_POLL_CONTROLLER |
1038 | void (*ndo_poll_controller)(struct net_device *dev); | 1038 | void (*ndo_poll_controller)(struct net_device *dev); |
1039 | int (*ndo_netpoll_setup)(struct net_device *dev, | 1039 | int (*ndo_netpoll_setup)(struct net_device *dev, |
1040 | struct netpoll_info *info, | 1040 | struct netpoll_info *info); |
1041 | gfp_t gfp); | ||
1042 | void (*ndo_netpoll_cleanup)(struct net_device *dev); | 1041 | void (*ndo_netpoll_cleanup)(struct net_device *dev); |
1043 | #endif | 1042 | #endif |
1044 | #ifdef CONFIG_NET_RX_BUSY_POLL | 1043 | #ifdef CONFIG_NET_RX_BUSY_POLL |
@@ -2910,6 +2909,11 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) | |||
2910 | } \ | 2909 | } \ |
2911 | } | 2910 | } |
2912 | 2911 | ||
2912 | #define HARD_TX_TRYLOCK(dev, txq) \ | ||
2913 | (((dev->features & NETIF_F_LLTX) == 0) ? \ | ||
2914 | __netif_tx_trylock(txq) : \ | ||
2915 | true ) | ||
2916 | |||
2913 | #define HARD_TX_UNLOCK(dev, txq) { \ | 2917 | #define HARD_TX_UNLOCK(dev, txq) { \ |
2914 | if ((dev->features & NETIF_F_LLTX) == 0) { \ | 2918 | if ((dev->features & NETIF_F_LLTX) == 0) { \ |
2915 | __netif_tx_unlock(txq); \ | 2919 | __netif_tx_unlock(txq); \ |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 1b475a5a7239..b25ee9ffdbe6 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -47,17 +47,17 @@ struct netpoll_info { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | #ifdef CONFIG_NETPOLL | 49 | #ifdef CONFIG_NETPOLL |
50 | extern void netpoll_rx_disable(struct net_device *dev); | 50 | extern void netpoll_poll_disable(struct net_device *dev); |
51 | extern void netpoll_rx_enable(struct net_device *dev); | 51 | extern void netpoll_poll_enable(struct net_device *dev); |
52 | #else | 52 | #else |
53 | static inline void netpoll_rx_disable(struct net_device *dev) { return; } | 53 | static inline void netpoll_poll_disable(struct net_device *dev) { return; } |
54 | static inline void netpoll_rx_enable(struct net_device *dev) { return; } | 54 | static inline void netpoll_poll_enable(struct net_device *dev) { return; } |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); | 57 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); |
58 | void netpoll_print_options(struct netpoll *np); | 58 | void netpoll_print_options(struct netpoll *np); |
59 | int netpoll_parse_options(struct netpoll *np, char *opt); | 59 | int netpoll_parse_options(struct netpoll *np, char *opt); |
60 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); | 60 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev); |
61 | int netpoll_setup(struct netpoll *np); | 61 | int netpoll_setup(struct netpoll *np); |
62 | void __netpoll_cleanup(struct netpoll *np); | 62 | void __netpoll_cleanup(struct netpoll *np); |
63 | void __netpoll_free_async(struct netpoll *np); | 63 | void __netpoll_free_async(struct netpoll *np); |
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 4f3e9073cb49..a78bebeca4d9 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c | |||
@@ -707,20 +707,19 @@ static void vlan_dev_poll_controller(struct net_device *dev) | |||
707 | return; | 707 | return; |
708 | } | 708 | } |
709 | 709 | ||
710 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, | 710 | static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) |
711 | gfp_t gfp) | ||
712 | { | 711 | { |
713 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); | 712 | struct vlan_dev_priv *vlan = vlan_dev_priv(dev); |
714 | struct net_device *real_dev = vlan->real_dev; | 713 | struct net_device *real_dev = vlan->real_dev; |
715 | struct netpoll *netpoll; | 714 | struct netpoll *netpoll; |
716 | int err = 0; | 715 | int err = 0; |
717 | 716 | ||
718 | netpoll = kzalloc(sizeof(*netpoll), gfp); | 717 | netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); |
719 | err = -ENOMEM; | 718 | err = -ENOMEM; |
720 | if (!netpoll) | 719 | if (!netpoll) |
721 | goto out; | 720 | goto out; |
722 | 721 | ||
723 | err = __netpoll_setup(netpoll, real_dev, gfp); | 722 | err = __netpoll_setup(netpoll, real_dev); |
724 | if (err) { | 723 | if (err) { |
725 | kfree(netpoll); | 724 | kfree(netpoll); |
726 | goto out; | 725 | goto out; |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index f2a08477e0f5..0dd01a05bd59 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -218,16 +218,16 @@ static void br_netpoll_cleanup(struct net_device *dev) | |||
218 | br_netpoll_disable(p); | 218 | br_netpoll_disable(p); |
219 | } | 219 | } |
220 | 220 | ||
221 | static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | 221 | static int __br_netpoll_enable(struct net_bridge_port *p) |
222 | { | 222 | { |
223 | struct netpoll *np; | 223 | struct netpoll *np; |
224 | int err; | 224 | int err; |
225 | 225 | ||
226 | np = kzalloc(sizeof(*p->np), gfp); | 226 | np = kzalloc(sizeof(*p->np), GFP_KERNEL); |
227 | if (!np) | 227 | if (!np) |
228 | return -ENOMEM; | 228 | return -ENOMEM; |
229 | 229 | ||
230 | err = __netpoll_setup(np, p->dev, gfp); | 230 | err = __netpoll_setup(np, p->dev); |
231 | if (err) { | 231 | if (err) { |
232 | kfree(np); | 232 | kfree(np); |
233 | return err; | 233 | return err; |
@@ -237,16 +237,15 @@ static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | |||
237 | return err; | 237 | return err; |
238 | } | 238 | } |
239 | 239 | ||
240 | int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | 240 | int br_netpoll_enable(struct net_bridge_port *p) |
241 | { | 241 | { |
242 | if (!p->br->dev->npinfo) | 242 | if (!p->br->dev->npinfo) |
243 | return 0; | 243 | return 0; |
244 | 244 | ||
245 | return __br_netpoll_enable(p, gfp); | 245 | return __br_netpoll_enable(p); |
246 | } | 246 | } |
247 | 247 | ||
248 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, | 248 | static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) |
249 | gfp_t gfp) | ||
250 | { | 249 | { |
251 | struct net_bridge *br = netdev_priv(dev); | 250 | struct net_bridge *br = netdev_priv(dev); |
252 | struct net_bridge_port *p; | 251 | struct net_bridge_port *p; |
@@ -255,7 +254,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, | |||
255 | list_for_each_entry(p, &br->port_list, list) { | 254 | list_for_each_entry(p, &br->port_list, list) { |
256 | if (!p->dev) | 255 | if (!p->dev) |
257 | continue; | 256 | continue; |
258 | err = __br_netpoll_enable(p, gfp); | 257 | err = __br_netpoll_enable(p); |
259 | if (err) | 258 | if (err) |
260 | goto fail; | 259 | goto fail; |
261 | } | 260 | } |
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 54d207d3a31c..5262b8617eb9 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -366,7 +366,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) | |||
366 | if (err) | 366 | if (err) |
367 | goto err2; | 367 | goto err2; |
368 | 368 | ||
369 | err = br_netpoll_enable(p, GFP_KERNEL); | 369 | err = br_netpoll_enable(p); |
370 | if (err) | 370 | if (err) |
371 | goto err3; | 371 | goto err3; |
372 | 372 | ||
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index e1ca1dc916a4..06811d79f89f 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -349,7 +349,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, | |||
349 | netpoll_send_skb(np, skb); | 349 | netpoll_send_skb(np, skb); |
350 | } | 350 | } |
351 | 351 | ||
352 | int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); | 352 | int br_netpoll_enable(struct net_bridge_port *p); |
353 | void br_netpoll_disable(struct net_bridge_port *p); | 353 | void br_netpoll_disable(struct net_bridge_port *p); |
354 | #else | 354 | #else |
355 | static inline void br_netpoll_send_skb(const struct net_bridge_port *p, | 355 | static inline void br_netpoll_send_skb(const struct net_bridge_port *p, |
@@ -357,7 +357,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, | |||
357 | { | 357 | { |
358 | } | 358 | } |
359 | 359 | ||
360 | static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) | 360 | static inline int br_netpoll_enable(struct net_bridge_port *p) |
361 | { | 361 | { |
362 | return 0; | 362 | return 0; |
363 | } | 363 | } |
diff --git a/net/core/dev.c b/net/core/dev.c index 98ba581b89f0..778b2036a9e7 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1245,7 +1245,7 @@ static int __dev_open(struct net_device *dev) | |||
1245 | * If we don't do this there is a chance ndo_poll_controller | 1245 | * If we don't do this there is a chance ndo_poll_controller |
1246 | * or ndo_poll may be running while we open the device | 1246 | * or ndo_poll may be running while we open the device |
1247 | */ | 1247 | */ |
1248 | netpoll_rx_disable(dev); | 1248 | netpoll_poll_disable(dev); |
1249 | 1249 | ||
1250 | ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); | 1250 | ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev); |
1251 | ret = notifier_to_errno(ret); | 1251 | ret = notifier_to_errno(ret); |
@@ -1260,7 +1260,7 @@ static int __dev_open(struct net_device *dev) | |||
1260 | if (!ret && ops->ndo_open) | 1260 | if (!ret && ops->ndo_open) |
1261 | ret = ops->ndo_open(dev); | 1261 | ret = ops->ndo_open(dev); |
1262 | 1262 | ||
1263 | netpoll_rx_enable(dev); | 1263 | netpoll_poll_enable(dev); |
1264 | 1264 | ||
1265 | if (ret) | 1265 | if (ret) |
1266 | clear_bit(__LINK_STATE_START, &dev->state); | 1266 | clear_bit(__LINK_STATE_START, &dev->state); |
@@ -1313,6 +1313,9 @@ static int __dev_close_many(struct list_head *head) | |||
1313 | might_sleep(); | 1313 | might_sleep(); |
1314 | 1314 | ||
1315 | list_for_each_entry(dev, head, close_list) { | 1315 | list_for_each_entry(dev, head, close_list) { |
1316 | /* Temporarily disable netpoll until the interface is down */ | ||
1317 | netpoll_poll_disable(dev); | ||
1318 | |||
1316 | call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); | 1319 | call_netdevice_notifiers(NETDEV_GOING_DOWN, dev); |
1317 | 1320 | ||
1318 | clear_bit(__LINK_STATE_START, &dev->state); | 1321 | clear_bit(__LINK_STATE_START, &dev->state); |
@@ -1343,6 +1346,7 @@ static int __dev_close_many(struct list_head *head) | |||
1343 | 1346 | ||
1344 | dev->flags &= ~IFF_UP; | 1347 | dev->flags &= ~IFF_UP; |
1345 | net_dmaengine_put(); | 1348 | net_dmaengine_put(); |
1349 | netpoll_poll_enable(dev); | ||
1346 | } | 1350 | } |
1347 | 1351 | ||
1348 | return 0; | 1352 | return 0; |
@@ -1353,14 +1357,10 @@ static int __dev_close(struct net_device *dev) | |||
1353 | int retval; | 1357 | int retval; |
1354 | LIST_HEAD(single); | 1358 | LIST_HEAD(single); |
1355 | 1359 | ||
1356 | /* Temporarily disable netpoll until the interface is down */ | ||
1357 | netpoll_rx_disable(dev); | ||
1358 | |||
1359 | list_add(&dev->close_list, &single); | 1360 | list_add(&dev->close_list, &single); |
1360 | retval = __dev_close_many(&single); | 1361 | retval = __dev_close_many(&single); |
1361 | list_del(&single); | 1362 | list_del(&single); |
1362 | 1363 | ||
1363 | netpoll_rx_enable(dev); | ||
1364 | return retval; | 1364 | return retval; |
1365 | } | 1365 | } |
1366 | 1366 | ||
@@ -1398,14 +1398,9 @@ int dev_close(struct net_device *dev) | |||
1398 | if (dev->flags & IFF_UP) { | 1398 | if (dev->flags & IFF_UP) { |
1399 | LIST_HEAD(single); | 1399 | LIST_HEAD(single); |
1400 | 1400 | ||
1401 | /* Block netpoll rx while the interface is going down */ | ||
1402 | netpoll_rx_disable(dev); | ||
1403 | |||
1404 | list_add(&dev->close_list, &single); | 1401 | list_add(&dev->close_list, &single); |
1405 | dev_close_many(&single); | 1402 | dev_close_many(&single); |
1406 | list_del(&single); | 1403 | list_del(&single); |
1407 | |||
1408 | netpoll_rx_enable(dev); | ||
1409 | } | 1404 | } |
1410 | return 0; | 1405 | return 0; |
1411 | } | 1406 | } |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 41c4e9ce1141..ed7740f7a94d 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -69,6 +69,37 @@ module_param(carrier_timeout, uint, 0644); | |||
69 | #define np_notice(np, fmt, ...) \ | 69 | #define np_notice(np, fmt, ...) \ |
70 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) | 70 | pr_notice("%s: " fmt, np->name, ##__VA_ARGS__) |
71 | 71 | ||
72 | static int netpoll_start_xmit(struct sk_buff *skb, struct net_device *dev, | ||
73 | struct netdev_queue *txq) | ||
74 | { | ||
75 | const struct net_device_ops *ops = dev->netdev_ops; | ||
76 | int status = NETDEV_TX_OK; | ||
77 | netdev_features_t features; | ||
78 | |||
79 | features = netif_skb_features(skb); | ||
80 | |||
81 | if (vlan_tx_tag_present(skb) && | ||
82 | !vlan_hw_offload_capable(features, skb->vlan_proto)) { | ||
83 | skb = __vlan_put_tag(skb, skb->vlan_proto, | ||
84 | vlan_tx_tag_get(skb)); | ||
85 | if (unlikely(!skb)) { | ||
86 | /* This is actually a packet drop, but we | ||
87 | * don't want the code that calls this | ||
88 | * function to try and operate on a NULL skb. | ||
89 | */ | ||
90 | goto out; | ||
91 | } | ||
92 | skb->vlan_tci = 0; | ||
93 | } | ||
94 | |||
95 | status = ops->ndo_start_xmit(skb, dev); | ||
96 | if (status == NETDEV_TX_OK) | ||
97 | txq_trans_update(txq); | ||
98 | |||
99 | out: | ||
100 | return status; | ||
101 | } | ||
102 | |||
72 | static void queue_process(struct work_struct *work) | 103 | static void queue_process(struct work_struct *work) |
73 | { | 104 | { |
74 | struct netpoll_info *npinfo = | 105 | struct netpoll_info *npinfo = |
@@ -78,28 +109,27 @@ static void queue_process(struct work_struct *work) | |||
78 | 109 | ||
79 | while ((skb = skb_dequeue(&npinfo->txq))) { | 110 | while ((skb = skb_dequeue(&npinfo->txq))) { |
80 | struct net_device *dev = skb->dev; | 111 | struct net_device *dev = skb->dev; |
81 | const struct net_device_ops *ops = dev->netdev_ops; | ||
82 | struct netdev_queue *txq; | 112 | struct netdev_queue *txq; |
83 | 113 | ||
84 | if (!netif_device_present(dev) || !netif_running(dev)) { | 114 | if (!netif_device_present(dev) || !netif_running(dev)) { |
85 | __kfree_skb(skb); | 115 | kfree_skb(skb); |
86 | continue; | 116 | continue; |
87 | } | 117 | } |
88 | 118 | ||
89 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); | 119 | txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); |
90 | 120 | ||
91 | local_irq_save(flags); | 121 | local_irq_save(flags); |
92 | __netif_tx_lock(txq, smp_processor_id()); | 122 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
93 | if (netif_xmit_frozen_or_stopped(txq) || | 123 | if (netif_xmit_frozen_or_stopped(txq) || |
94 | ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { | 124 | netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { |
95 | skb_queue_head(&npinfo->txq, skb); | 125 | skb_queue_head(&npinfo->txq, skb); |
96 | __netif_tx_unlock(txq); | 126 | HARD_TX_UNLOCK(dev, txq); |
97 | local_irq_restore(flags); | 127 | local_irq_restore(flags); |
98 | 128 | ||
99 | schedule_delayed_work(&npinfo->tx_work, HZ/10); | 129 | schedule_delayed_work(&npinfo->tx_work, HZ/10); |
100 | return; | 130 | return; |
101 | } | 131 | } |
102 | __netif_tx_unlock(txq); | 132 | HARD_TX_UNLOCK(dev, txq); |
103 | local_irq_restore(flags); | 133 | local_irq_restore(flags); |
104 | } | 134 | } |
105 | } | 135 | } |
@@ -184,7 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev) | |||
184 | zap_completion_queue(); | 214 | zap_completion_queue(); |
185 | } | 215 | } |
186 | 216 | ||
187 | void netpoll_rx_disable(struct net_device *dev) | 217 | void netpoll_poll_disable(struct net_device *dev) |
188 | { | 218 | { |
189 | struct netpoll_info *ni; | 219 | struct netpoll_info *ni; |
190 | int idx; | 220 | int idx; |
@@ -195,9 +225,9 @@ void netpoll_rx_disable(struct net_device *dev) | |||
195 | down(&ni->dev_lock); | 225 | down(&ni->dev_lock); |
196 | srcu_read_unlock(&netpoll_srcu, idx); | 226 | srcu_read_unlock(&netpoll_srcu, idx); |
197 | } | 227 | } |
198 | EXPORT_SYMBOL(netpoll_rx_disable); | 228 | EXPORT_SYMBOL(netpoll_poll_disable); |
199 | 229 | ||
200 | void netpoll_rx_enable(struct net_device *dev) | 230 | void netpoll_poll_enable(struct net_device *dev) |
201 | { | 231 | { |
202 | struct netpoll_info *ni; | 232 | struct netpoll_info *ni; |
203 | rcu_read_lock(); | 233 | rcu_read_lock(); |
@@ -206,7 +236,7 @@ void netpoll_rx_enable(struct net_device *dev) | |||
206 | up(&ni->dev_lock); | 236 | up(&ni->dev_lock); |
207 | rcu_read_unlock(); | 237 | rcu_read_unlock(); |
208 | } | 238 | } |
209 | EXPORT_SYMBOL(netpoll_rx_enable); | 239 | EXPORT_SYMBOL(netpoll_poll_enable); |
210 | 240 | ||
211 | static void refill_skbs(void) | 241 | static void refill_skbs(void) |
212 | { | 242 | { |
@@ -295,7 +325,6 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
295 | { | 325 | { |
296 | int status = NETDEV_TX_BUSY; | 326 | int status = NETDEV_TX_BUSY; |
297 | unsigned long tries; | 327 | unsigned long tries; |
298 | const struct net_device_ops *ops = dev->netdev_ops; | ||
299 | /* It is up to the caller to keep npinfo alive. */ | 328 | /* It is up to the caller to keep npinfo alive. */ |
300 | struct netpoll_info *npinfo; | 329 | struct netpoll_info *npinfo; |
301 | 330 | ||
@@ -303,7 +332,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
303 | 332 | ||
304 | npinfo = rcu_dereference_bh(np->dev->npinfo); | 333 | npinfo = rcu_dereference_bh(np->dev->npinfo); |
305 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { | 334 | if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { |
306 | __kfree_skb(skb); | 335 | dev_kfree_skb_irq(skb); |
307 | return; | 336 | return; |
308 | } | 337 | } |
309 | 338 | ||
@@ -316,29 +345,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
316 | /* try until next clock tick */ | 345 | /* try until next clock tick */ |
317 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; | 346 | for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; |
318 | tries > 0; --tries) { | 347 | tries > 0; --tries) { |
319 | if (__netif_tx_trylock(txq)) { | 348 | if (HARD_TX_TRYLOCK(dev, txq)) { |
320 | if (!netif_xmit_stopped(txq)) { | 349 | if (!netif_xmit_stopped(txq)) |
321 | if (vlan_tx_tag_present(skb) && | 350 | status = netpoll_start_xmit(skb, dev, txq); |
322 | !vlan_hw_offload_capable(netif_skb_features(skb), | 351 | |
323 | skb->vlan_proto)) { | 352 | HARD_TX_UNLOCK(dev, txq); |
324 | skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb)); | ||
325 | if (unlikely(!skb)) { | ||
326 | /* This is actually a packet drop, but we | ||
327 | * don't want the code at the end of this | ||
328 | * function to try and re-queue a NULL skb. | ||
329 | */ | ||
330 | status = NETDEV_TX_OK; | ||
331 | goto unlock_txq; | ||
332 | } | ||
333 | skb->vlan_tci = 0; | ||
334 | } | ||
335 | |||
336 | status = ops->ndo_start_xmit(skb, dev); | ||
337 | if (status == NETDEV_TX_OK) | ||
338 | txq_trans_update(txq); | ||
339 | } | ||
340 | unlock_txq: | ||
341 | __netif_tx_unlock(txq); | ||
342 | 353 | ||
343 | if (status == NETDEV_TX_OK) | 354 | if (status == NETDEV_TX_OK) |
344 | break; | 355 | break; |
@@ -353,7 +364,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
353 | 364 | ||
354 | WARN_ONCE(!irqs_disabled(), | 365 | WARN_ONCE(!irqs_disabled(), |
355 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", | 366 | "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", |
356 | dev->name, ops->ndo_start_xmit); | 367 | dev->name, dev->netdev_ops->ndo_start_xmit); |
357 | 368 | ||
358 | } | 369 | } |
359 | 370 | ||
@@ -584,7 +595,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
584 | } | 595 | } |
585 | EXPORT_SYMBOL(netpoll_parse_options); | 596 | EXPORT_SYMBOL(netpoll_parse_options); |
586 | 597 | ||
587 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | 598 | int __netpoll_setup(struct netpoll *np, struct net_device *ndev) |
588 | { | 599 | { |
589 | struct netpoll_info *npinfo; | 600 | struct netpoll_info *npinfo; |
590 | const struct net_device_ops *ops; | 601 | const struct net_device_ops *ops; |
@@ -603,7 +614,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
603 | } | 614 | } |
604 | 615 | ||
605 | if (!ndev->npinfo) { | 616 | if (!ndev->npinfo) { |
606 | npinfo = kmalloc(sizeof(*npinfo), gfp); | 617 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); |
607 | if (!npinfo) { | 618 | if (!npinfo) { |
608 | err = -ENOMEM; | 619 | err = -ENOMEM; |
609 | goto out; | 620 | goto out; |
@@ -617,7 +628,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
617 | 628 | ||
618 | ops = np->dev->netdev_ops; | 629 | ops = np->dev->netdev_ops; |
619 | if (ops->ndo_netpoll_setup) { | 630 | if (ops->ndo_netpoll_setup) { |
620 | err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); | 631 | err = ops->ndo_netpoll_setup(ndev, npinfo); |
621 | if (err) | 632 | if (err) |
622 | goto free_npinfo; | 633 | goto free_npinfo; |
623 | } | 634 | } |
@@ -749,7 +760,7 @@ int netpoll_setup(struct netpoll *np) | |||
749 | /* fill up the skb queue */ | 760 | /* fill up the skb queue */ |
750 | refill_skbs(); | 761 | refill_skbs(); |
751 | 762 | ||
752 | err = __netpoll_setup(np, ndev, GFP_KERNEL); | 763 | err = __netpoll_setup(np, ndev); |
753 | if (err) | 764 | if (err) |
754 | goto put; | 765 | goto put; |
755 | 766 | ||