diff options
author | Herbert Xu <herbert@gondor.apana.org.au> | 2010-06-10 12:12:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-06-15 13:58:40 -0400 |
commit | 8fdd95ec162a8fbac7f41d6f54f90402fe3e8cb1 (patch) | |
tree | e326d2640054c986fd5458719a45c4ef630953a8 | |
parent | 4247e161b12f8dffb7ee3ee07bc5e61f714ebe2d (diff) |
netpoll: Allow netpoll_setup/cleanup recursion
This patch adds the functions __netpoll_setup/__netpoll_cleanup
which is designed to be called recursively through ndo_netpoll_seutp.
They must be called with RTNL held, and the caller must initialise
np->dev and ensure that it has a valid reference count.
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netpoll.h | 2 | ||||
-rw-r--r-- | net/core/netpoll.c | 176 |
2 files changed, 99 insertions, 79 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 95c9f7e16776..f3ad74af7e1f 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -46,9 +46,11 @@ void netpoll_poll(struct netpoll *np); | |||
46 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); | 46 | void netpoll_send_udp(struct netpoll *np, const char *msg, int len); |
47 | void netpoll_print_options(struct netpoll *np); | 47 | void netpoll_print_options(struct netpoll *np); |
48 | int netpoll_parse_options(struct netpoll *np, char *opt); | 48 | int netpoll_parse_options(struct netpoll *np, char *opt); |
49 | int __netpoll_setup(struct netpoll *np); | ||
49 | int netpoll_setup(struct netpoll *np); | 50 | int netpoll_setup(struct netpoll *np); |
50 | int netpoll_trap(void); | 51 | int netpoll_trap(void); |
51 | void netpoll_set_trap(int trap); | 52 | void netpoll_set_trap(int trap); |
53 | void __netpoll_cleanup(struct netpoll *np); | ||
52 | void netpoll_cleanup(struct netpoll *np); | 54 | void netpoll_cleanup(struct netpoll *np); |
53 | int __netpoll_rx(struct sk_buff *skb); | 55 | int __netpoll_rx(struct sk_buff *skb); |
54 | void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); | 56 | void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 7de6dcad5d79..560297ee55b4 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -693,15 +693,78 @@ int netpoll_parse_options(struct netpoll *np, char *opt) | |||
693 | return -1; | 693 | return -1; |
694 | } | 694 | } |
695 | 695 | ||
696 | int netpoll_setup(struct netpoll *np) | 696 | int __netpoll_setup(struct netpoll *np) |
697 | { | 697 | { |
698 | struct net_device *ndev = NULL; | 698 | struct net_device *ndev = np->dev; |
699 | struct in_device *in_dev; | ||
700 | struct netpoll_info *npinfo; | 699 | struct netpoll_info *npinfo; |
701 | const struct net_device_ops *ops; | 700 | const struct net_device_ops *ops; |
702 | unsigned long flags; | 701 | unsigned long flags; |
703 | int err; | 702 | int err; |
704 | 703 | ||
704 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || | ||
705 | !ndev->netdev_ops->ndo_poll_controller) { | ||
706 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", | ||
707 | np->name, np->dev_name); | ||
708 | err = -ENOTSUPP; | ||
709 | goto out; | ||
710 | } | ||
711 | |||
712 | if (!ndev->npinfo) { | ||
713 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | ||
714 | if (!npinfo) { | ||
715 | err = -ENOMEM; | ||
716 | goto out; | ||
717 | } | ||
718 | |||
719 | npinfo->rx_flags = 0; | ||
720 | INIT_LIST_HEAD(&npinfo->rx_np); | ||
721 | |||
722 | spin_lock_init(&npinfo->rx_lock); | ||
723 | skb_queue_head_init(&npinfo->arp_tx); | ||
724 | skb_queue_head_init(&npinfo->txq); | ||
725 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | ||
726 | |||
727 | atomic_set(&npinfo->refcnt, 1); | ||
728 | |||
729 | ops = np->dev->netdev_ops; | ||
730 | if (ops->ndo_netpoll_setup) { | ||
731 | err = ops->ndo_netpoll_setup(ndev, npinfo); | ||
732 | if (err) | ||
733 | goto free_npinfo; | ||
734 | } | ||
735 | } else { | ||
736 | npinfo = ndev->npinfo; | ||
737 | atomic_inc(&npinfo->refcnt); | ||
738 | } | ||
739 | |||
740 | npinfo->netpoll = np; | ||
741 | |||
742 | if (np->rx_hook) { | ||
743 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
744 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | ||
745 | list_add_tail(&np->rx, &npinfo->rx_np); | ||
746 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
747 | } | ||
748 | |||
749 | /* last thing to do is link it to the net device structure */ | ||
750 | rcu_assign_pointer(ndev->npinfo, npinfo); | ||
751 | rtnl_unlock(); | ||
752 | |||
753 | return 0; | ||
754 | |||
755 | free_npinfo: | ||
756 | kfree(npinfo); | ||
757 | out: | ||
758 | return err; | ||
759 | } | ||
760 | EXPORT_SYMBOL_GPL(__netpoll_setup); | ||
761 | |||
762 | int netpoll_setup(struct netpoll *np) | ||
763 | { | ||
764 | struct net_device *ndev = NULL; | ||
765 | struct in_device *in_dev; | ||
766 | int err; | ||
767 | |||
705 | if (np->dev_name) | 768 | if (np->dev_name) |
706 | ndev = dev_get_by_name(&init_net, np->dev_name); | 769 | ndev = dev_get_by_name(&init_net, np->dev_name); |
707 | if (!ndev) { | 770 | if (!ndev) { |
@@ -774,61 +837,14 @@ int netpoll_setup(struct netpoll *np) | |||
774 | refill_skbs(); | 837 | refill_skbs(); |
775 | 838 | ||
776 | rtnl_lock(); | 839 | rtnl_lock(); |
777 | if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) || | 840 | err = __netpoll_setup(np); |
778 | !ndev->netdev_ops->ndo_poll_controller) { | ||
779 | printk(KERN_ERR "%s: %s doesn't support polling, aborting.\n", | ||
780 | np->name, np->dev_name); | ||
781 | err = -ENOTSUPP; | ||
782 | goto unlock; | ||
783 | } | ||
784 | |||
785 | if (!ndev->npinfo) { | ||
786 | npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); | ||
787 | if (!npinfo) { | ||
788 | err = -ENOMEM; | ||
789 | goto unlock; | ||
790 | } | ||
791 | |||
792 | npinfo->rx_flags = 0; | ||
793 | INIT_LIST_HEAD(&npinfo->rx_np); | ||
794 | |||
795 | spin_lock_init(&npinfo->rx_lock); | ||
796 | skb_queue_head_init(&npinfo->arp_tx); | ||
797 | skb_queue_head_init(&npinfo->txq); | ||
798 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | ||
799 | |||
800 | atomic_set(&npinfo->refcnt, 1); | ||
801 | |||
802 | ops = np->dev->netdev_ops; | ||
803 | if (ops->ndo_netpoll_setup) { | ||
804 | err = ops->ndo_netpoll_setup(ndev, npinfo); | ||
805 | if (err) | ||
806 | goto free_npinfo; | ||
807 | } | ||
808 | } else { | ||
809 | npinfo = ndev->npinfo; | ||
810 | atomic_inc(&npinfo->refcnt); | ||
811 | } | ||
812 | |||
813 | npinfo->netpoll = np; | ||
814 | |||
815 | if (np->rx_hook) { | ||
816 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
817 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | ||
818 | list_add_tail(&np->rx, &npinfo->rx_np); | ||
819 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
820 | } | ||
821 | |||
822 | /* last thing to do is link it to the net device structure */ | ||
823 | rcu_assign_pointer(ndev->npinfo, npinfo); | ||
824 | rtnl_unlock(); | 841 | rtnl_unlock(); |
825 | 842 | ||
843 | if (err) | ||
844 | goto put; | ||
845 | |||
826 | return 0; | 846 | return 0; |
827 | 847 | ||
828 | free_npinfo: | ||
829 | kfree(npinfo); | ||
830 | unlock: | ||
831 | rtnl_unlock(); | ||
832 | put: | 848 | put: |
833 | dev_put(ndev); | 849 | dev_put(ndev); |
834 | return err; | 850 | return err; |
@@ -841,40 +857,32 @@ static int __init netpoll_init(void) | |||
841 | } | 857 | } |
842 | core_initcall(netpoll_init); | 858 | core_initcall(netpoll_init); |
843 | 859 | ||
844 | void netpoll_cleanup(struct netpoll *np) | 860 | void __netpoll_cleanup(struct netpoll *np) |
845 | { | 861 | { |
846 | struct netpoll_info *npinfo; | 862 | struct netpoll_info *npinfo; |
847 | unsigned long flags; | 863 | unsigned long flags; |
848 | int free = 0; | ||
849 | 864 | ||
850 | if (!np->dev) | 865 | npinfo = np->dev->npinfo; |
866 | if (!npinfo) | ||
851 | return; | 867 | return; |
852 | 868 | ||
853 | rtnl_lock(); | 869 | if (!list_empty(&npinfo->rx_np)) { |
854 | npinfo = np->dev->npinfo; | 870 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
855 | if (npinfo) { | 871 | list_del(&np->rx); |
856 | if (!list_empty(&npinfo->rx_np)) { | 872 | if (list_empty(&npinfo->rx_np)) |
857 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 873 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; |
858 | list_del(&np->rx); | 874 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
859 | if (list_empty(&npinfo->rx_np)) | 875 | } |
860 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | ||
861 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
862 | } | ||
863 | 876 | ||
864 | free = atomic_dec_and_test(&npinfo->refcnt); | 877 | if (atomic_dec_and_test(&npinfo->refcnt)) { |
865 | if (free) { | 878 | const struct net_device_ops *ops; |
866 | const struct net_device_ops *ops; | ||
867 | 879 | ||
868 | ops = np->dev->netdev_ops; | 880 | ops = np->dev->netdev_ops; |
869 | if (ops->ndo_netpoll_cleanup) | 881 | if (ops->ndo_netpoll_cleanup) |
870 | ops->ndo_netpoll_cleanup(np->dev); | 882 | ops->ndo_netpoll_cleanup(np->dev); |
871 | 883 | ||
872 | rcu_assign_pointer(np->dev->npinfo, NULL); | 884 | rcu_assign_pointer(np->dev->npinfo, NULL); |
873 | } | ||
874 | } | ||
875 | rtnl_unlock(); | ||
876 | 885 | ||
877 | if (free) { | ||
878 | /* avoid racing with NAPI reading npinfo */ | 886 | /* avoid racing with NAPI reading npinfo */ |
879 | synchronize_rcu_bh(); | 887 | synchronize_rcu_bh(); |
880 | 888 | ||
@@ -886,9 +894,19 @@ void netpoll_cleanup(struct netpoll *np) | |||
886 | __skb_queue_purge(&npinfo->txq); | 894 | __skb_queue_purge(&npinfo->txq); |
887 | kfree(npinfo); | 895 | kfree(npinfo); |
888 | } | 896 | } |
897 | } | ||
898 | EXPORT_SYMBOL_GPL(__netpoll_cleanup); | ||
889 | 899 | ||
890 | dev_put(np->dev); | 900 | void netpoll_cleanup(struct netpoll *np) |
901 | { | ||
902 | if (!np->dev) | ||
903 | return; | ||
891 | 904 | ||
905 | rtnl_lock(); | ||
906 | __netpoll_cleanup(np); | ||
907 | rtnl_unlock(); | ||
908 | |||
909 | dev_put(np->dev); | ||
892 | np->dev = NULL; | 910 | np->dev = NULL; |
893 | } | 911 | } |
894 | 912 | ||