diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2014-03-14 23:50:58 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-17 15:48:12 -0400 |
commit | e1bd4d3d7dd2a4a0e731ffe07c439927c23f16ea (patch) | |
tree | 1ea21340270dcd91feda2d8ed397dffa3d22a5ae /net/core | |
parent | 18b37535f861b7eb053040b0b9502331a781c782 (diff) |
netpoll: Move all receive processing under CONFIG_NETPOLL_TRAP
Make rx_skb_hook, and rx in struct netpoll depend on
CONFIG_NETPOLL_TRAP Make rx_lock, rx_np, and neigh_tx in struct
netpoll_info depend on CONFIG_NETPOLL_TRAP
Make the functions netpoll_rx_on, netpoll_rx, and netpoll_receive_skb
no-ops when CONFIG_NETPOLL_TRAP is not set.
Only build netpoll_neigh_reply, checksum_udp service_neigh_queue,
pkt_is_ns, and __netpoll_rx when CONFIG_NETPOLL_TRAP is defined.
Add helper functions netpoll_trap_setup, netpoll_trap_setup_info,
netpoll_trap_cleanup, and netpoll_trap_cleanup_info that initialize
and cleanup the struct netpoll and struct netpoll_info receive
specific fields when CONFIG_NETPOLL_TRAP is enabled and do nothing
otherwise.
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/netpoll.c | 81 |
1 files changed, 64 insertions, 17 deletions
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index b69bb3f1ba3f..eed8b1d2d302 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -48,6 +48,7 @@ static struct sk_buff_head skb_pool; | |||
48 | 48 | ||
49 | #ifdef CONFIG_NETPOLL_TRAP | 49 | #ifdef CONFIG_NETPOLL_TRAP |
50 | static atomic_t trapped; | 50 | static atomic_t trapped; |
51 | static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo); | ||
51 | #endif | 52 | #endif |
52 | 53 | ||
53 | DEFINE_STATIC_SRCU(netpoll_srcu); | 54 | DEFINE_STATIC_SRCU(netpoll_srcu); |
@@ -61,7 +62,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu); | |||
61 | MAX_UDP_CHUNK) | 62 | MAX_UDP_CHUNK) |
62 | 63 | ||
63 | static void zap_completion_queue(void); | 64 | static void zap_completion_queue(void); |
64 | static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo); | ||
65 | static void netpoll_async_cleanup(struct work_struct *work); | 65 | static void netpoll_async_cleanup(struct work_struct *work); |
66 | 66 | ||
67 | static unsigned int carrier_timeout = 4; | 67 | static unsigned int carrier_timeout = 4; |
@@ -109,6 +109,7 @@ static void queue_process(struct work_struct *work) | |||
109 | } | 109 | } |
110 | } | 110 | } |
111 | 111 | ||
112 | #ifdef CONFIG_NETPOLL_TRAP | ||
112 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | 113 | static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, |
113 | unsigned short ulen, __be32 saddr, __be32 daddr) | 114 | unsigned short ulen, __be32 saddr, __be32 daddr) |
114 | { | 115 | { |
@@ -127,6 +128,7 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, | |||
127 | 128 | ||
128 | return __skb_checksum_complete(skb); | 129 | return __skb_checksum_complete(skb); |
129 | } | 130 | } |
131 | #endif /* CONFIG_NETPOLL_TRAP */ | ||
130 | 132 | ||
131 | /* | 133 | /* |
132 | * Check whether delayed processing was scheduled for our NIC. If so, | 134 | * Check whether delayed processing was scheduled for our NIC. If so, |
@@ -179,6 +181,7 @@ static void poll_napi(struct net_device *dev, int budget) | |||
179 | } | 181 | } |
180 | } | 182 | } |
181 | 183 | ||
184 | #ifdef CONFIG_NETPOLL_TRAP | ||
182 | static void service_neigh_queue(struct net_device *dev, | 185 | static void service_neigh_queue(struct net_device *dev, |
183 | struct netpoll_info *npi) | 186 | struct netpoll_info *npi) |
184 | { | 187 | { |
@@ -197,6 +200,12 @@ static void service_neigh_queue(struct net_device *dev, | |||
197 | while ((skb = skb_dequeue(&npi->neigh_tx))) | 200 | while ((skb = skb_dequeue(&npi->neigh_tx))) |
198 | netpoll_neigh_reply(skb, npi); | 201 | netpoll_neigh_reply(skb, npi); |
199 | } | 202 | } |
203 | #else /* !CONFIG_NETPOLL_TRAP */ | ||
204 | static inline void service_neigh_queue(struct net_device *dev, | ||
205 | struct netpoll_info *npi) | ||
206 | { | ||
207 | } | ||
208 | #endif /* CONFIG_NETPOLL_TRAP */ | ||
200 | 209 | ||
201 | static void netpoll_poll_dev(struct net_device *dev) | 210 | static void netpoll_poll_dev(struct net_device *dev) |
202 | { | 211 | { |
@@ -522,6 +531,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) | |||
522 | } | 531 | } |
523 | EXPORT_SYMBOL(netpoll_send_udp); | 532 | EXPORT_SYMBOL(netpoll_send_udp); |
524 | 533 | ||
534 | #ifdef CONFIG_NETPOLL_TRAP | ||
525 | static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo) | 535 | static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo) |
526 | { | 536 | { |
527 | int size, type = ARPOP_REPLY; | 537 | int size, type = ARPOP_REPLY; |
@@ -900,6 +910,55 @@ out: | |||
900 | return 0; | 910 | return 0; |
901 | } | 911 | } |
902 | 912 | ||
913 | static void netpoll_trap_setup_info(struct netpoll_info *npinfo) | ||
914 | { | ||
915 | INIT_LIST_HEAD(&npinfo->rx_np); | ||
916 | spin_lock_init(&npinfo->rx_lock); | ||
917 | skb_queue_head_init(&npinfo->neigh_tx); | ||
918 | } | ||
919 | |||
920 | static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo) | ||
921 | { | ||
922 | skb_queue_purge(&npinfo->neigh_tx); | ||
923 | } | ||
924 | |||
925 | static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo) | ||
926 | { | ||
927 | unsigned long flags; | ||
928 | if (np->rx_skb_hook) { | ||
929 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
930 | list_add_tail(&np->rx, &npinfo->rx_np); | ||
931 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo) | ||
936 | { | ||
937 | unsigned long flags; | ||
938 | if (!list_empty(&npinfo->rx_np)) { | ||
939 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
940 | list_del(&np->rx); | ||
941 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
942 | } | ||
943 | } | ||
944 | |||
945 | #else /* !CONFIG_NETPOLL_TRAP */ | ||
946 | static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo) | ||
947 | { | ||
948 | } | ||
949 | static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo) | ||
950 | { | ||
951 | } | ||
952 | static inline | ||
953 | void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo) | ||
954 | { | ||
955 | } | ||
956 | static inline | ||
957 | void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo) | ||
958 | { | ||
959 | } | ||
960 | #endif /* CONFIG_NETPOLL_TRAP */ | ||
961 | |||
903 | void netpoll_print_options(struct netpoll *np) | 962 | void netpoll_print_options(struct netpoll *np) |
904 | { | 963 | { |
905 | np_info(np, "local port %d\n", np->local_port); | 964 | np_info(np, "local port %d\n", np->local_port); |
@@ -1023,7 +1082,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
1023 | { | 1082 | { |
1024 | struct netpoll_info *npinfo; | 1083 | struct netpoll_info *npinfo; |
1025 | const struct net_device_ops *ops; | 1084 | const struct net_device_ops *ops; |
1026 | unsigned long flags; | ||
1027 | int err; | 1085 | int err; |
1028 | 1086 | ||
1029 | np->dev = ndev; | 1087 | np->dev = ndev; |
@@ -1045,11 +1103,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
1045 | goto out; | 1103 | goto out; |
1046 | } | 1104 | } |
1047 | 1105 | ||
1048 | INIT_LIST_HEAD(&npinfo->rx_np); | 1106 | netpoll_trap_setup_info(npinfo); |
1049 | 1107 | ||
1050 | spin_lock_init(&npinfo->rx_lock); | ||
1051 | sema_init(&npinfo->dev_lock, 1); | 1108 | sema_init(&npinfo->dev_lock, 1); |
1052 | skb_queue_head_init(&npinfo->neigh_tx); | ||
1053 | skb_queue_head_init(&npinfo->txq); | 1109 | skb_queue_head_init(&npinfo->txq); |
1054 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); | 1110 | INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); |
1055 | 1111 | ||
@@ -1068,11 +1124,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) | |||
1068 | 1124 | ||
1069 | npinfo->netpoll = np; | 1125 | npinfo->netpoll = np; |
1070 | 1126 | ||
1071 | if (np->rx_skb_hook) { | 1127 | netpoll_trap_setup(np, npinfo); |
1072 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
1073 | list_add_tail(&np->rx, &npinfo->rx_np); | ||
1074 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
1075 | } | ||
1076 | 1128 | ||
1077 | /* last thing to do is link it to the net device structure */ | 1129 | /* last thing to do is link it to the net device structure */ |
1078 | rcu_assign_pointer(ndev->npinfo, npinfo); | 1130 | rcu_assign_pointer(ndev->npinfo, npinfo); |
@@ -1222,7 +1274,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | |||
1222 | struct netpoll_info *npinfo = | 1274 | struct netpoll_info *npinfo = |
1223 | container_of(rcu_head, struct netpoll_info, rcu); | 1275 | container_of(rcu_head, struct netpoll_info, rcu); |
1224 | 1276 | ||
1225 | skb_queue_purge(&npinfo->neigh_tx); | 1277 | netpoll_trap_cleanup_info(npinfo); |
1226 | skb_queue_purge(&npinfo->txq); | 1278 | skb_queue_purge(&npinfo->txq); |
1227 | 1279 | ||
1228 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ | 1280 | /* we can't call cancel_delayed_work_sync here, as we are in softirq */ |
@@ -1238,7 +1290,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) | |||
1238 | void __netpoll_cleanup(struct netpoll *np) | 1290 | void __netpoll_cleanup(struct netpoll *np) |
1239 | { | 1291 | { |
1240 | struct netpoll_info *npinfo; | 1292 | struct netpoll_info *npinfo; |
1241 | unsigned long flags; | ||
1242 | 1293 | ||
1243 | /* rtnl_dereference would be preferable here but | 1294 | /* rtnl_dereference would be preferable here but |
1244 | * rcu_cleanup_netpoll path can put us in here safely without | 1295 | * rcu_cleanup_netpoll path can put us in here safely without |
@@ -1248,11 +1299,7 @@ void __netpoll_cleanup(struct netpoll *np) | |||
1248 | if (!npinfo) | 1299 | if (!npinfo) |
1249 | return; | 1300 | return; |
1250 | 1301 | ||
1251 | if (!list_empty(&npinfo->rx_np)) { | 1302 | netpoll_trap_cleanup(np, npinfo); |
1252 | spin_lock_irqsave(&npinfo->rx_lock, flags); | ||
1253 | list_del(&np->rx); | ||
1254 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | ||
1255 | } | ||
1256 | 1303 | ||
1257 | synchronize_srcu(&netpoll_srcu); | 1304 | synchronize_srcu(&netpoll_srcu); |
1258 | 1305 | ||