aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /net/core/netpoll.c
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c60
1 files changed, 25 insertions, 35 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 537e01afd81b..18d9cbda3a39 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -35,7 +35,6 @@
35 35
36#define MAX_UDP_CHUNK 1460 36#define MAX_UDP_CHUNK 1460
37#define MAX_SKBS 32 37#define MAX_SKBS 32
38#define MAX_QUEUE_DEPTH (MAX_SKBS / 2)
39 38
40static struct sk_buff_head skb_pool; 39static struct sk_buff_head skb_pool;
41 40
@@ -76,8 +75,7 @@ static void queue_process(struct work_struct *work)
76 75
77 local_irq_save(flags); 76 local_irq_save(flags);
78 __netif_tx_lock(txq, smp_processor_id()); 77 __netif_tx_lock(txq, smp_processor_id());
79 if (netif_tx_queue_stopped(txq) || 78 if (netif_tx_queue_frozen_or_stopped(txq) ||
80 netif_tx_queue_frozen(txq) ||
81 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) { 79 ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
82 skb_queue_head(&npinfo->txq, skb); 80 skb_queue_head(&npinfo->txq, skb);
83 __netif_tx_unlock(txq); 81 __netif_tx_unlock(txq);
@@ -195,6 +193,17 @@ void netpoll_poll_dev(struct net_device *dev)
195 193
196 poll_napi(dev); 194 poll_napi(dev);
197 195
196 if (dev->priv_flags & IFF_SLAVE) {
197 if (dev->npinfo) {
198 struct net_device *bond_dev = dev->master;
199 struct sk_buff *skb;
200 while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) {
201 skb->dev = bond_dev;
202 skb_queue_tail(&bond_dev->npinfo->arp_tx, skb);
203 }
204 }
205 }
206
198 service_arp_queue(dev->npinfo); 207 service_arp_queue(dev->npinfo);
199 208
200 zap_completion_queue(); 209 zap_completion_queue();
@@ -288,11 +297,11 @@ static int netpoll_owner_active(struct net_device *dev)
288 return 0; 297 return 0;
289} 298}
290 299
291void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 300void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
301 struct net_device *dev)
292{ 302{
293 int status = NETDEV_TX_BUSY; 303 int status = NETDEV_TX_BUSY;
294 unsigned long tries; 304 unsigned long tries;
295 struct net_device *dev = np->dev;
296 const struct net_device_ops *ops = dev->netdev_ops; 305 const struct net_device_ops *ops = dev->netdev_ops;
297 /* It is up to the caller to keep npinfo alive. */ 306 /* It is up to the caller to keep npinfo alive. */
298 struct netpoll_info *npinfo = np->dev->npinfo; 307 struct netpoll_info *npinfo = np->dev->npinfo;
@@ -315,9 +324,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
315 tries > 0; --tries) { 324 tries > 0; --tries) {
316 if (__netif_tx_trylock(txq)) { 325 if (__netif_tx_trylock(txq)) {
317 if (!netif_tx_queue_stopped(txq)) { 326 if (!netif_tx_queue_stopped(txq)) {
318 dev->priv_flags |= IFF_IN_NETPOLL;
319 status = ops->ndo_start_xmit(skb, dev); 327 status = ops->ndo_start_xmit(skb, dev);
320 dev->priv_flags &= ~IFF_IN_NETPOLL;
321 if (status == NETDEV_TX_OK) 328 if (status == NETDEV_TX_OK)
322 txq_trans_update(txq); 329 txq_trans_update(txq);
323 } 330 }
@@ -346,7 +353,7 @@ void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
346 schedule_delayed_work(&npinfo->tx_work,0); 353 schedule_delayed_work(&npinfo->tx_work,0);
347 } 354 }
348} 355}
349EXPORT_SYMBOL(netpoll_send_skb); 356EXPORT_SYMBOL(netpoll_send_skb_on_dev);
350 357
351void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 358void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
352{ 359{
@@ -532,7 +539,7 @@ int __netpoll_rx(struct sk_buff *skb)
532{ 539{
533 int proto, len, ulen; 540 int proto, len, ulen;
534 int hits = 0; 541 int hits = 0;
535 struct iphdr *iph; 542 const struct iphdr *iph;
536 struct udphdr *uh; 543 struct udphdr *uh;
537 struct netpoll_info *npinfo = skb->dev->npinfo; 544 struct netpoll_info *npinfo = skb->dev->npinfo;
538 struct netpoll *np, *tmp; 545 struct netpoll *np, *tmp;
@@ -691,32 +698,8 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
691 698
692 if (*cur != 0) { 699 if (*cur != 0) {
693 /* MAC address */ 700 /* MAC address */
694 if ((delim = strchr(cur, ':')) == NULL) 701 if (!mac_pton(cur, np->remote_mac))
695 goto parse_failed;
696 *delim = 0;
697 np->remote_mac[0] = simple_strtol(cur, NULL, 16);
698 cur = delim + 1;
699 if ((delim = strchr(cur, ':')) == NULL)
700 goto parse_failed;
701 *delim = 0;
702 np->remote_mac[1] = simple_strtol(cur, NULL, 16);
703 cur = delim + 1;
704 if ((delim = strchr(cur, ':')) == NULL)
705 goto parse_failed; 702 goto parse_failed;
706 *delim = 0;
707 np->remote_mac[2] = simple_strtol(cur, NULL, 16);
708 cur = delim + 1;
709 if ((delim = strchr(cur, ':')) == NULL)
710 goto parse_failed;
711 *delim = 0;
712 np->remote_mac[3] = simple_strtol(cur, NULL, 16);
713 cur = delim + 1;
714 if ((delim = strchr(cur, ':')) == NULL)
715 goto parse_failed;
716 *delim = 0;
717 np->remote_mac[4] = simple_strtol(cur, NULL, 16);
718 cur = delim + 1;
719 np->remote_mac[5] = simple_strtol(cur, NULL, 16);
720 } 703 }
721 704
722 netpoll_print_options(np); 705 netpoll_print_options(np);
@@ -809,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
809 return -ENODEV; 792 return -ENODEV;
810 } 793 }
811 794
795 if (ndev->master) {
796 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
797 np->name, np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
812 if (!netif_running(ndev)) { 802 if (!netif_running(ndev)) {
813 unsigned long atmost, atleast; 803 unsigned long atmost, atleast;
814 804
@@ -925,7 +915,7 @@ void __netpoll_cleanup(struct netpoll *np)
925 915
926 skb_queue_purge(&npinfo->arp_tx); 916 skb_queue_purge(&npinfo->arp_tx);
927 skb_queue_purge(&npinfo->txq); 917 skb_queue_purge(&npinfo->txq);
928 cancel_rearming_delayed_work(&npinfo->tx_work); 918 cancel_delayed_work_sync(&npinfo->tx_work);
929 919
930 /* clean after last, unfinished work */ 920 /* clean after last, unfinished work */
931 __skb_queue_purge(&npinfo->txq); 921 __skb_queue_purge(&npinfo->txq);