aboutsummaryrefslogtreecommitdiffstats
path: root/net/core
diff options
context:
space:
mode:
authorWANG Cong <amwang@redhat.com>2010-05-06 03:47:21 -0400
committerDavid S. Miller <davem@davemloft.net>2010-05-06 03:47:21 -0400
commit0e34e93177fb1f642cab080e0bde664c06c7183a (patch)
tree5353f873ab99c2cff76f12b41e9a9e2018e66b30 /net/core
parent08259594e047170923ef11d1482648642bfe606f (diff)
netpoll: add generic support for bridge and bonding devices
This whole patchset is for adding netpoll support to bridge and bonding devices. I already tested it for bridge, bonding, bridge over bonding, and bonding over bridge. It looks fine now. To make bridge and bonding support netpoll, we need to adjust some netpoll generic code. This patch does the following things: 1) introduce two new priv_flags for struct net_device: IFF_IN_NETPOLL which identifies we are processing a netpoll; IFF_DISABLE_NETPOLL is used to disable netpoll support for a device at run-time; 2) introduce one new method for netdev_ops: ->ndo_netpoll_cleanup() is used to clean up netpoll when a device is removed. 3) introduce netpoll_poll_dev() which takes a struct net_device * parameter; export netpoll_send_skb() and netpoll_poll_dev() which will be used later; 4) hide a pointer to struct netpoll in struct netpoll_info, ditto. 5) introduce ->real_dev for struct netpoll. 6) introduce a new status NETDEV_BONDING_DESLAE, which is used to disable netconsole before releasing a slave, to avoid deadlocks. Cc: David Miller <davem@davemloft.net> Cc: Neil Horman <nhorman@tuxdriver.com> Signed-off-by: WANG Cong <amwang@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r--net/core/netpoll.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index a58f59b9759..94825b10955 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -179,9 +179,8 @@ static void service_arp_queue(struct netpoll_info *npi)
179 } 179 }
180} 180}
181 181
182void netpoll_poll(struct netpoll *np) 182void netpoll_poll_dev(struct net_device *dev)
183{ 183{
184 struct net_device *dev = np->dev;
185 const struct net_device_ops *ops; 184 const struct net_device_ops *ops;
186 185
187 if (!dev || !netif_running(dev)) 186 if (!dev || !netif_running(dev))
@@ -201,6 +200,11 @@ void netpoll_poll(struct netpoll *np)
201 zap_completion_queue(); 200 zap_completion_queue();
202} 201}
203 202
203void netpoll_poll(struct netpoll *np)
204{
205 netpoll_poll_dev(np->dev);
206}
207
204static void refill_skbs(void) 208static void refill_skbs(void)
205{ 209{
206 struct sk_buff *skb; 210 struct sk_buff *skb;
@@ -282,7 +286,7 @@ static int netpoll_owner_active(struct net_device *dev)
282 return 0; 286 return 0;
283} 287}
284 288
285static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 289void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
286{ 290{
287 int status = NETDEV_TX_BUSY; 291 int status = NETDEV_TX_BUSY;
288 unsigned long tries; 292 unsigned long tries;
@@ -308,7 +312,9 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
308 tries > 0; --tries) { 312 tries > 0; --tries) {
309 if (__netif_tx_trylock(txq)) { 313 if (__netif_tx_trylock(txq)) {
310 if (!netif_tx_queue_stopped(txq)) { 314 if (!netif_tx_queue_stopped(txq)) {
315 dev->priv_flags |= IFF_IN_NETPOLL;
311 status = ops->ndo_start_xmit(skb, dev); 316 status = ops->ndo_start_xmit(skb, dev);
317 dev->priv_flags &= ~IFF_IN_NETPOLL;
312 if (status == NETDEV_TX_OK) 318 if (status == NETDEV_TX_OK)
313 txq_trans_update(txq); 319 txq_trans_update(txq);
314 } 320 }
@@ -756,7 +762,10 @@ int netpoll_setup(struct netpoll *np)
756 atomic_inc(&npinfo->refcnt); 762 atomic_inc(&npinfo->refcnt);
757 } 763 }
758 764
759 if (!ndev->netdev_ops->ndo_poll_controller) { 765 npinfo->netpoll = np;
766
767 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
768 !ndev->netdev_ops->ndo_poll_controller) {
760 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", 769 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
761 np->name, np->dev_name); 770 np->name, np->dev_name);
762 err = -ENOTSUPP; 771 err = -ENOTSUPP;
@@ -878,6 +887,7 @@ void netpoll_cleanup(struct netpoll *np)
878 } 887 }
879 888
880 if (atomic_dec_and_test(&npinfo->refcnt)) { 889 if (atomic_dec_and_test(&npinfo->refcnt)) {
890 const struct net_device_ops *ops;
881 skb_queue_purge(&npinfo->arp_tx); 891 skb_queue_purge(&npinfo->arp_tx);
882 skb_queue_purge(&npinfo->txq); 892 skb_queue_purge(&npinfo->txq);
883 cancel_rearming_delayed_work(&npinfo->tx_work); 893 cancel_rearming_delayed_work(&npinfo->tx_work);
@@ -885,7 +895,11 @@ void netpoll_cleanup(struct netpoll *np)
885 /* clean after last, unfinished work */ 895 /* clean after last, unfinished work */
886 __skb_queue_purge(&npinfo->txq); 896 __skb_queue_purge(&npinfo->txq);
887 kfree(npinfo); 897 kfree(npinfo);
888 np->dev->npinfo = NULL; 898 ops = np->dev->netdev_ops;
899 if (ops->ndo_netpoll_cleanup)
900 ops->ndo_netpoll_cleanup(np->dev);
901 else
902 np->dev->npinfo = NULL;
889 } 903 }
890 } 904 }
891 905
@@ -908,6 +922,7 @@ void netpoll_set_trap(int trap)
908 atomic_dec(&trapped); 922 atomic_dec(&trapped);
909} 923}
910 924
925EXPORT_SYMBOL(netpoll_send_skb);
911EXPORT_SYMBOL(netpoll_set_trap); 926EXPORT_SYMBOL(netpoll_set_trap);
912EXPORT_SYMBOL(netpoll_trap); 927EXPORT_SYMBOL(netpoll_trap);
913EXPORT_SYMBOL(netpoll_print_options); 928EXPORT_SYMBOL(netpoll_print_options);
@@ -915,4 +930,5 @@ EXPORT_SYMBOL(netpoll_parse_options);
915EXPORT_SYMBOL(netpoll_setup); 930EXPORT_SYMBOL(netpoll_setup);
916EXPORT_SYMBOL(netpoll_cleanup); 931EXPORT_SYMBOL(netpoll_cleanup);
917EXPORT_SYMBOL(netpoll_send_udp); 932EXPORT_SYMBOL(netpoll_send_udp);
933EXPORT_SYMBOL(netpoll_poll_dev);
918EXPORT_SYMBOL(netpoll_poll); 934EXPORT_SYMBOL(netpoll_poll);