aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 23:50:58 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-17 15:48:12 -0400
commite1bd4d3d7dd2a4a0e731ffe07c439927c23f16ea (patch)
tree1ea21340270dcd91feda2d8ed397dffa3d22a5ae
parent18b37535f861b7eb053040b0b9502331a781c782 (diff)
netpoll: Move all receive processing under CONFIG_NETPOLL_TRAP
Make rx_skb_hook, and rx in struct netpoll depend on CONFIG_NETPOLL_TRAP Make rx_lock, rx_np, and neigh_tx in struct netpoll_info depend on CONFIG_NETPOLL_TRAP Make the functions netpoll_rx_on, netpoll_rx, and netpoll_receive_skb no-ops when CONFIG_NETPOLL_TRAP is not set. Only build netpoll_neigh_reply, checksum_udp service_neigh_queue, pkt_is_ns, and __netpoll_rx when CONFIG_NETPOLL_TRAP is defined. Add helper functions netpoll_trap_setup, netpoll_trap_setup_info, netpoll_trap_cleanup, and netpoll_trap_cleanup_info that initialize and cleanup the struct netpoll and struct netpoll_info receive specific fields when CONFIG_NETPOLL_TRAP is enabled and do nothing otherwise. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/netpoll.h73
-rw-r--r--net/core/netpoll.c81
2 files changed, 104 insertions, 50 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index ab9aaaff8d04..a0632af88d8b 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -24,32 +24,38 @@ struct netpoll {
24 struct net_device *dev; 24 struct net_device *dev;
25 char dev_name[IFNAMSIZ]; 25 char dev_name[IFNAMSIZ];
26 const char *name; 26 const char *name;
27 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
28 int offset, int len);
29 27
30 union inet_addr local_ip, remote_ip; 28 union inet_addr local_ip, remote_ip;
31 bool ipv6; 29 bool ipv6;
32 u16 local_port, remote_port; 30 u16 local_port, remote_port;
33 u8 remote_mac[ETH_ALEN]; 31 u8 remote_mac[ETH_ALEN];
34 32
35 struct list_head rx; /* rx_np list element */
36 struct work_struct cleanup_work; 33 struct work_struct cleanup_work;
34
35#ifdef CONFIG_NETPOLL_TRAP
36 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
37 int offset, int len);
38 struct list_head rx; /* rx_np list element */
39#endif
37}; 40};
38 41
39struct netpoll_info { 42struct netpoll_info {
40 atomic_t refcnt; 43 atomic_t refcnt;
41 44
42 spinlock_t rx_lock;
43 struct semaphore dev_lock; 45 struct semaphore dev_lock;
44 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
45 46
46 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
47 struct sk_buff_head txq; 47 struct sk_buff_head txq;
48 48
49 struct delayed_work tx_work; 49 struct delayed_work tx_work;
50 50
51 struct netpoll *netpoll; 51 struct netpoll *netpoll;
52 struct rcu_head rcu; 52 struct rcu_head rcu;
53
54#ifdef CONFIG_NETPOLL_TRAP
55 spinlock_t rx_lock;
56 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
57 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
58#endif
53}; 59};
54 60
55#ifdef CONFIG_NETPOLL 61#ifdef CONFIG_NETPOLL
@@ -68,7 +74,6 @@ int netpoll_setup(struct netpoll *np);
68void __netpoll_cleanup(struct netpoll *np); 74void __netpoll_cleanup(struct netpoll *np);
69void __netpoll_free_async(struct netpoll *np); 75void __netpoll_free_async(struct netpoll *np);
70void netpoll_cleanup(struct netpoll *np); 76void netpoll_cleanup(struct netpoll *np);
71int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
72void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, 77void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
73 struct net_device *dev); 78 struct net_device *dev);
74static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) 79static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
@@ -82,25 +87,12 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
82#ifdef CONFIG_NETPOLL_TRAP 87#ifdef CONFIG_NETPOLL_TRAP
83int netpoll_trap(void); 88int netpoll_trap(void);
84void netpoll_set_trap(int trap); 89void netpoll_set_trap(int trap);
90int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
85static inline bool netpoll_rx_processing(struct netpoll_info *npinfo) 91static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
86{ 92{
87 return !list_empty(&npinfo->rx_np); 93 return !list_empty(&npinfo->rx_np);
88} 94}
89#else
90static inline int netpoll_trap(void)
91{
92 return 0;
93}
94static inline void netpoll_set_trap(int trap)
95{
96}
97static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
98{
99 return false;
100}
101#endif
102 95
103#ifdef CONFIG_NETPOLL
104static inline bool netpoll_rx_on(struct sk_buff *skb) 96static inline bool netpoll_rx_on(struct sk_buff *skb)
105{ 97{
106 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); 98 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
@@ -138,6 +130,33 @@ static inline int netpoll_receive_skb(struct sk_buff *skb)
138 return 0; 130 return 0;
139} 131}
140 132
133#else
134static inline int netpoll_trap(void)
135{
136 return 0;
137}
138static inline void netpoll_set_trap(int trap)
139{
140}
141static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
142{
143 return false;
144}
145static inline bool netpoll_rx(struct sk_buff *skb)
146{
147 return false;
148}
149static inline bool netpoll_rx_on(struct sk_buff *skb)
150{
151 return false;
152}
153static inline int netpoll_receive_skb(struct sk_buff *skb)
154{
155 return 0;
156}
157#endif
158
159#ifdef CONFIG_NETPOLL
141static inline void *netpoll_poll_lock(struct napi_struct *napi) 160static inline void *netpoll_poll_lock(struct napi_struct *napi)
142{ 161{
143 struct net_device *dev = napi->dev; 162 struct net_device *dev = napi->dev;
@@ -166,18 +185,6 @@ static inline bool netpoll_tx_running(struct net_device *dev)
166} 185}
167 186
168#else 187#else
169static inline bool netpoll_rx(struct sk_buff *skb)
170{
171 return false;
172}
173static inline bool netpoll_rx_on(struct sk_buff *skb)
174{
175 return false;
176}
177static inline int netpoll_receive_skb(struct sk_buff *skb)
178{
179 return 0;
180}
181static inline void *netpoll_poll_lock(struct napi_struct *napi) 188static inline void *netpoll_poll_lock(struct napi_struct *napi)
182{ 189{
183 return NULL; 190 return NULL;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index b69bb3f1ba3f..eed8b1d2d302 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -48,6 +48,7 @@ static struct sk_buff_head skb_pool;
48 48
49#ifdef CONFIG_NETPOLL_TRAP 49#ifdef CONFIG_NETPOLL_TRAP
50static atomic_t trapped; 50static atomic_t trapped;
51static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
51#endif 52#endif
52 53
53DEFINE_STATIC_SRCU(netpoll_srcu); 54DEFINE_STATIC_SRCU(netpoll_srcu);
@@ -61,7 +62,6 @@ DEFINE_STATIC_SRCU(netpoll_srcu);
61 MAX_UDP_CHUNK) 62 MAX_UDP_CHUNK)
62 63
63static void zap_completion_queue(void); 64static void zap_completion_queue(void);
64static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo);
65static void netpoll_async_cleanup(struct work_struct *work); 65static void netpoll_async_cleanup(struct work_struct *work);
66 66
67static unsigned int carrier_timeout = 4; 67static unsigned int carrier_timeout = 4;
@@ -109,6 +109,7 @@ static void queue_process(struct work_struct *work)
109 } 109 }
110} 110}
111 111
112#ifdef CONFIG_NETPOLL_TRAP
112static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh, 113static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
113 unsigned short ulen, __be32 saddr, __be32 daddr) 114 unsigned short ulen, __be32 saddr, __be32 daddr)
114{ 115{
@@ -127,6 +128,7 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
127 128
128 return __skb_checksum_complete(skb); 129 return __skb_checksum_complete(skb);
129} 130}
131#endif /* CONFIG_NETPOLL_TRAP */
130 132
131/* 133/*
132 * Check whether delayed processing was scheduled for our NIC. If so, 134 * Check whether delayed processing was scheduled for our NIC. If so,
@@ -179,6 +181,7 @@ static void poll_napi(struct net_device *dev, int budget)
179 } 181 }
180} 182}
181 183
184#ifdef CONFIG_NETPOLL_TRAP
182static void service_neigh_queue(struct net_device *dev, 185static void service_neigh_queue(struct net_device *dev,
183 struct netpoll_info *npi) 186 struct netpoll_info *npi)
184{ 187{
@@ -197,6 +200,12 @@ static void service_neigh_queue(struct net_device *dev,
197 while ((skb = skb_dequeue(&npi->neigh_tx))) 200 while ((skb = skb_dequeue(&npi->neigh_tx)))
198 netpoll_neigh_reply(skb, npi); 201 netpoll_neigh_reply(skb, npi);
199} 202}
203#else /* !CONFIG_NETPOLL_TRAP */
204static inline void service_neigh_queue(struct net_device *dev,
205 struct netpoll_info *npi)
206{
207}
208#endif /* CONFIG_NETPOLL_TRAP */
200 209
201static void netpoll_poll_dev(struct net_device *dev) 210static void netpoll_poll_dev(struct net_device *dev)
202{ 211{
@@ -522,6 +531,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
522} 531}
523EXPORT_SYMBOL(netpoll_send_udp); 532EXPORT_SYMBOL(netpoll_send_udp);
524 533
534#ifdef CONFIG_NETPOLL_TRAP
525static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo) 535static void netpoll_neigh_reply(struct sk_buff *skb, struct netpoll_info *npinfo)
526{ 536{
527 int size, type = ARPOP_REPLY; 537 int size, type = ARPOP_REPLY;
@@ -900,6 +910,55 @@ out:
900 return 0; 910 return 0;
901} 911}
902 912
913static void netpoll_trap_setup_info(struct netpoll_info *npinfo)
914{
915 INIT_LIST_HEAD(&npinfo->rx_np);
916 spin_lock_init(&npinfo->rx_lock);
917 skb_queue_head_init(&npinfo->neigh_tx);
918}
919
920static void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
921{
922 skb_queue_purge(&npinfo->neigh_tx);
923}
924
925static void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
926{
927 unsigned long flags;
928 if (np->rx_skb_hook) {
929 spin_lock_irqsave(&npinfo->rx_lock, flags);
930 list_add_tail(&np->rx, &npinfo->rx_np);
931 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
932 }
933}
934
935static void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
936{
937 unsigned long flags;
938 if (!list_empty(&npinfo->rx_np)) {
939 spin_lock_irqsave(&npinfo->rx_lock, flags);
940 list_del(&np->rx);
941 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
942 }
943}
944
945#else /* !CONFIG_NETPOLL_TRAP */
946static inline void netpoll_trap_setup_info(struct netpoll_info *npinfo)
947{
948}
949static inline void netpoll_trap_cleanup_info(struct netpoll_info *npinfo)
950{
951}
952static inline
953void netpoll_trap_setup(struct netpoll *np, struct netpoll_info *npinfo)
954{
955}
956static inline
957void netpoll_trap_cleanup(struct netpoll *np, struct netpoll_info *npinfo)
958{
959}
960#endif /* CONFIG_NETPOLL_TRAP */
961
903void netpoll_print_options(struct netpoll *np) 962void netpoll_print_options(struct netpoll *np)
904{ 963{
905 np_info(np, "local port %d\n", np->local_port); 964 np_info(np, "local port %d\n", np->local_port);
@@ -1023,7 +1082,6 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1023{ 1082{
1024 struct netpoll_info *npinfo; 1083 struct netpoll_info *npinfo;
1025 const struct net_device_ops *ops; 1084 const struct net_device_ops *ops;
1026 unsigned long flags;
1027 int err; 1085 int err;
1028 1086
1029 np->dev = ndev; 1087 np->dev = ndev;
@@ -1045,11 +1103,9 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1045 goto out; 1103 goto out;
1046 } 1104 }
1047 1105
1048 INIT_LIST_HEAD(&npinfo->rx_np); 1106 netpoll_trap_setup_info(npinfo);
1049 1107
1050 spin_lock_init(&npinfo->rx_lock);
1051 sema_init(&npinfo->dev_lock, 1); 1108 sema_init(&npinfo->dev_lock, 1);
1052 skb_queue_head_init(&npinfo->neigh_tx);
1053 skb_queue_head_init(&npinfo->txq); 1109 skb_queue_head_init(&npinfo->txq);
1054 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process); 1110 INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
1055 1111
@@ -1068,11 +1124,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp)
1068 1124
1069 npinfo->netpoll = np; 1125 npinfo->netpoll = np;
1070 1126
1071 if (np->rx_skb_hook) { 1127 netpoll_trap_setup(np, npinfo);
1072 spin_lock_irqsave(&npinfo->rx_lock, flags);
1073 list_add_tail(&np->rx, &npinfo->rx_np);
1074 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1075 }
1076 1128
1077 /* last thing to do is link it to the net device structure */ 1129 /* last thing to do is link it to the net device structure */
1078 rcu_assign_pointer(ndev->npinfo, npinfo); 1130 rcu_assign_pointer(ndev->npinfo, npinfo);
@@ -1222,7 +1274,7 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1222 struct netpoll_info *npinfo = 1274 struct netpoll_info *npinfo =
1223 container_of(rcu_head, struct netpoll_info, rcu); 1275 container_of(rcu_head, struct netpoll_info, rcu);
1224 1276
1225 skb_queue_purge(&npinfo->neigh_tx); 1277 netpoll_trap_cleanup_info(npinfo);
1226 skb_queue_purge(&npinfo->txq); 1278 skb_queue_purge(&npinfo->txq);
1227 1279
1228 /* we can't call cancel_delayed_work_sync here, as we are in softirq */ 1280 /* we can't call cancel_delayed_work_sync here, as we are in softirq */
@@ -1238,7 +1290,6 @@ static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head)
1238void __netpoll_cleanup(struct netpoll *np) 1290void __netpoll_cleanup(struct netpoll *np)
1239{ 1291{
1240 struct netpoll_info *npinfo; 1292 struct netpoll_info *npinfo;
1241 unsigned long flags;
1242 1293
1243 /* rtnl_dereference would be preferable here but 1294 /* rtnl_dereference would be preferable here but
1244 * rcu_cleanup_netpoll path can put us in here safely without 1295 * rcu_cleanup_netpoll path can put us in here safely without
@@ -1248,11 +1299,7 @@ void __netpoll_cleanup(struct netpoll *np)
1248 if (!npinfo) 1299 if (!npinfo)
1249 return; 1300 return;
1250 1301
1251 if (!list_empty(&npinfo->rx_np)) { 1302 netpoll_trap_cleanup(np, npinfo);
1252 spin_lock_irqsave(&npinfo->rx_lock, flags);
1253 list_del(&np->rx);
1254 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
1255 }
1256 1303
1257 synchronize_srcu(&netpoll_srcu); 1304 synchronize_srcu(&netpoll_srcu);
1258 1305