aboutsummaryrefslogtreecommitdiffstats
path: root/net/core/netpoll.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2010-06-10 12:12:46 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-15 13:58:39 -0400
commitdbaa154178341689faaa08fbf40b94ae5ca1d6c0 (patch)
treeb50b944ea7ed9b5ee86fd389e6089f30b261e7ae /net/core/netpoll.c
parentde85d99eb7b595f6751550184b94c1e2f74a828b (diff)
netpoll: Add locking for netpoll_setup/cleanup
As it stands, netpoll_setup and netpoll_cleanup have no locking protection whatsoever. So chaos ensures if two entities try to perform them on the same device. This patch adds RTNL to the equation. The code has been rearranged so that bits that do not need RTNL protection are now moved to the top of netpoll_setup. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/netpoll.c')
-rw-r--r--net/core/netpoll.c151
1 files changed, 76 insertions, 75 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index e9ab4f0c454..d10c249bcc8 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -698,7 +698,6 @@ int netpoll_setup(struct netpoll *np)
698 struct net_device *ndev = NULL; 698 struct net_device *ndev = NULL;
699 struct in_device *in_dev; 699 struct in_device *in_dev;
700 struct netpoll_info *npinfo; 700 struct netpoll_info *npinfo;
701 struct netpoll *npe, *tmp;
702 unsigned long flags; 701 unsigned long flags;
703 int err; 702 int err;
704 703
@@ -710,38 +709,6 @@ int netpoll_setup(struct netpoll *np)
710 return -ENODEV; 709 return -ENODEV;
711 } 710 }
712 711
713 np->dev = ndev;
714 if (!ndev->npinfo) {
715 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
716 if (!npinfo) {
717 err = -ENOMEM;
718 goto put;
719 }
720
721 npinfo->rx_flags = 0;
722 INIT_LIST_HEAD(&npinfo->rx_np);
723
724 spin_lock_init(&npinfo->rx_lock);
725 skb_queue_head_init(&npinfo->arp_tx);
726 skb_queue_head_init(&npinfo->txq);
727 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
728
729 atomic_set(&npinfo->refcnt, 1);
730 } else {
731 npinfo = ndev->npinfo;
732 atomic_inc(&npinfo->refcnt);
733 }
734
735 npinfo->netpoll = np;
736
737 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
738 !ndev->netdev_ops->ndo_poll_controller) {
739 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
740 np->name, np->dev_name);
741 err = -ENOTSUPP;
742 goto release;
743 }
744
745 if (!netif_running(ndev)) { 712 if (!netif_running(ndev)) {
746 unsigned long atmost, atleast; 713 unsigned long atmost, atleast;
747 714
@@ -755,7 +722,7 @@ int netpoll_setup(struct netpoll *np)
755 if (err) { 722 if (err) {
756 printk(KERN_ERR "%s: failed to open %s\n", 723 printk(KERN_ERR "%s: failed to open %s\n",
757 np->name, ndev->name); 724 np->name, ndev->name);
758 goto release; 725 goto put;
759 } 726 }
760 727
761 atleast = jiffies + HZ/10; 728 atleast = jiffies + HZ/10;
@@ -792,7 +759,7 @@ int netpoll_setup(struct netpoll *np)
792 printk(KERN_ERR "%s: no IP address for %s, aborting\n", 759 printk(KERN_ERR "%s: no IP address for %s, aborting\n",
793 np->name, np->dev_name); 760 np->name, np->dev_name);
794 err = -EDESTADDRREQ; 761 err = -EDESTADDRREQ;
795 goto release; 762 goto put;
796 } 763 }
797 764
798 np->local_ip = in_dev->ifa_list->ifa_local; 765 np->local_ip = in_dev->ifa_list->ifa_local;
@@ -800,6 +767,43 @@ int netpoll_setup(struct netpoll *np)
800 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip); 767 printk(KERN_INFO "%s: local IP %pI4\n", np->name, &np->local_ip);
801 } 768 }
802 769
770 np->dev = ndev;
771
772 /* fill up the skb queue */
773 refill_skbs();
774
775 rtnl_lock();
776 if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
777 !ndev->netdev_ops->ndo_poll_controller) {
778 printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n",
779 np->name, np->dev_name);
780 err = -ENOTSUPP;
781 goto unlock;
782 }
783
784 if (!ndev->npinfo) {
785 npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL);
786 if (!npinfo) {
787 err = -ENOMEM;
788 goto unlock;
789 }
790
791 npinfo->rx_flags = 0;
792 INIT_LIST_HEAD(&npinfo->rx_np);
793
794 spin_lock_init(&npinfo->rx_lock);
795 skb_queue_head_init(&npinfo->arp_tx);
796 skb_queue_head_init(&npinfo->txq);
797 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
798
799 atomic_set(&npinfo->refcnt, 1);
800 } else {
801 npinfo = ndev->npinfo;
802 atomic_inc(&npinfo->refcnt);
803 }
804
805 npinfo->netpoll = np;
806
803 if (np->rx_hook) { 807 if (np->rx_hook) {
804 spin_lock_irqsave(&npinfo->rx_lock, flags); 808 spin_lock_irqsave(&npinfo->rx_lock, flags);
805 npinfo->rx_flags |= NETPOLL_RX_ENABLED; 809 npinfo->rx_flags |= NETPOLL_RX_ENABLED;
@@ -807,24 +811,14 @@ int netpoll_setup(struct netpoll *np)
807 spin_unlock_irqrestore(&npinfo->rx_lock, flags); 811 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
808 } 812 }
809 813
810 /* fill up the skb queue */
811 refill_skbs();
812
813 /* last thing to do is link it to the net device structure */ 814 /* last thing to do is link it to the net device structure */
814 rcu_assign_pointer(ndev->npinfo, npinfo); 815 rcu_assign_pointer(ndev->npinfo, npinfo);
816 rtnl_unlock();
815 817
816 return 0; 818 return 0;
817 819
818 release: 820unlock:
819 if (!ndev->npinfo) { 821 rtnl_unlock();
820 spin_lock_irqsave(&npinfo->rx_lock, flags);
821 list_for_each_entry_safe(npe, tmp, &npinfo->rx_np, rx) {
822 npe->dev = NULL;
823 }
824 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
825
826 kfree(npinfo);
827 }
828put: 822put:
829 dev_put(ndev); 823 dev_put(ndev);
830 return err; 824 return err;
@@ -841,43 +835,50 @@ void netpoll_cleanup(struct netpoll *np)
841{ 835{
842 struct netpoll_info *npinfo; 836 struct netpoll_info *npinfo;
843 unsigned long flags; 837 unsigned long flags;
838 int free = 0;
844 839
845 if (np->dev) { 840 if (!np->dev)
846 npinfo = np->dev->npinfo; 841 return;
847 if (npinfo) {
848 if (!list_empty(&npinfo->rx_np)) {
849 spin_lock_irqsave(&npinfo->rx_lock, flags);
850 list_del(&np->rx);
851 if (list_empty(&npinfo->rx_np))
852 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
853 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
854 }
855 842
856 if (atomic_dec_and_test(&npinfo->refcnt)) { 843 rtnl_lock();
857 const struct net_device_ops *ops; 844 npinfo = np->dev->npinfo;
845 if (npinfo) {
846 if (!list_empty(&npinfo->rx_np)) {
847 spin_lock_irqsave(&npinfo->rx_lock, flags);
848 list_del(&np->rx);
849 if (list_empty(&npinfo->rx_np))
850 npinfo->rx_flags &= ~NETPOLL_RX_ENABLED;
851 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
852 }
858 853
859 ops = np->dev->netdev_ops; 854 free = atomic_dec_and_test(&npinfo->refcnt);
860 if (ops->ndo_netpoll_cleanup) 855 if (free) {
861 ops->ndo_netpoll_cleanup(np->dev); 856 const struct net_device_ops *ops;
862 857
863 rcu_assign_pointer(np->dev->npinfo, NULL); 858 ops = np->dev->netdev_ops;
859 if (ops->ndo_netpoll_cleanup)
860 ops->ndo_netpoll_cleanup(np->dev);
864 861
865 /* avoid racing with NAPI reading npinfo */ 862 rcu_assign_pointer(np->dev->npinfo, NULL);
866 synchronize_rcu_bh(); 863 }
864 }
865 rtnl_unlock();
867 866
868 skb_queue_purge(&npinfo->arp_tx); 867 if (free) {
869 skb_queue_purge(&npinfo->txq); 868 /* avoid racing with NAPI reading npinfo */
870 cancel_rearming_delayed_work(&npinfo->tx_work); 869 synchronize_rcu_bh();
871 870
872 /* clean after last, unfinished work */ 871 skb_queue_purge(&npinfo->arp_tx);
873 __skb_queue_purge(&npinfo->txq); 872 skb_queue_purge(&npinfo->txq);
874 kfree(npinfo); 873 cancel_rearming_delayed_work(&npinfo->tx_work);
875 }
876 }
877 874
878 dev_put(np->dev); 875 /* clean after last, unfinished work */
876 __skb_queue_purge(&npinfo->txq);
877 kfree(npinfo);
879 } 878 }
880 879
880 dev_put(np->dev);
881
881 np->dev = NULL; 882 np->dev = NULL;
882} 883}
883 884