aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2014-03-14 23:51:52 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-17 15:48:12 -0400
commit9c62a68d13119a1ca9718381d97b0cb415ff4e9d (patch)
treeaae7c0ffaa451dc44afbbba9c37780e9c12bd1fc /include/linux
parente1bd4d3d7dd2a4a0e731ffe07c439927c23f16ea (diff)
netpoll: Remove dead packet receive code (CONFIG_NETPOLL_TRAP)
The netpoll packet receive code only becomes active if the netpoll rx_skb_hook is implemented, and there is not a single implementation of the netpoll rx_skb_hook in the kernel. All of the out of tree implementations I have found all call netpoll_poll which was removed from the kernel in 2011, so this change should not add any additional breakage. There are problems with the netpoll packet receive code. __netpoll_rx does not call dev_kfree_skb_irq or dev_kfree_skb_any in hard irq context. netpoll_neigh_reply leaks every skb it receives. Reception of packets does not work successfully on stacked devices (aka bonding, team, bridge, and vlans). Given that the netpoll packet receive code is buggy, there are no out of tree users that will be merged soon, and the code has not been used for in tree for a decade let's just remove it. Reverting this commit can server as a starting point for anyone who wants to resurrect netpoll packet reception support. Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/netdevice.h17
-rw-r--r--include/linux/netpoll.h84
2 files changed, 0 insertions, 101 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b8d8c805fd75..4b6d12c7b803 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1979,9 +1979,6 @@ struct net_device *__dev_get_by_index(struct net *net, int ifindex);
1979struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); 1979struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
1980int netdev_get_name(struct net *net, char *name, int ifindex); 1980int netdev_get_name(struct net *net, char *name, int ifindex);
1981int dev_restart(struct net_device *dev); 1981int dev_restart(struct net_device *dev);
1982#ifdef CONFIG_NETPOLL_TRAP
1983int netpoll_trap(void);
1984#endif
1985int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb); 1982int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1986 1983
1987static inline unsigned int skb_gro_offset(const struct sk_buff *skb) 1984static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
@@ -2186,12 +2183,6 @@ static inline void netif_tx_start_all_queues(struct net_device *dev)
2186 2183
2187static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue) 2184static inline void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2188{ 2185{
2189#ifdef CONFIG_NETPOLL_TRAP
2190 if (netpoll_trap()) {
2191 netif_tx_start_queue(dev_queue);
2192 return;
2193 }
2194#endif
2195 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) 2186 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state))
2196 __netif_schedule(dev_queue->qdisc); 2187 __netif_schedule(dev_queue->qdisc);
2197} 2188}
@@ -2435,10 +2426,6 @@ static inline void netif_start_subqueue(struct net_device *dev, u16 queue_index)
2435static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index) 2426static inline void netif_stop_subqueue(struct net_device *dev, u16 queue_index)
2436{ 2427{
2437 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2428 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2438#ifdef CONFIG_NETPOLL_TRAP
2439 if (netpoll_trap())
2440 return;
2441#endif
2442 netif_tx_stop_queue(txq); 2429 netif_tx_stop_queue(txq);
2443} 2430}
2444 2431
@@ -2473,10 +2460,6 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
2473static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) 2460static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2474{ 2461{
2475 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); 2462 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2476#ifdef CONFIG_NETPOLL_TRAP
2477 if (netpoll_trap())
2478 return;
2479#endif
2480 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) 2463 if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state))
2481 __netif_schedule(txq->qdisc); 2464 __netif_schedule(txq->qdisc);
2482} 2465}
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index a0632af88d8b..1b475a5a7239 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -31,12 +31,6 @@ struct netpoll {
31 u8 remote_mac[ETH_ALEN]; 31 u8 remote_mac[ETH_ALEN];
32 32
33 struct work_struct cleanup_work; 33 struct work_struct cleanup_work;
34
35#ifdef CONFIG_NETPOLL_TRAP
36 void (*rx_skb_hook)(struct netpoll *np, int source, struct sk_buff *skb,
37 int offset, int len);
38 struct list_head rx; /* rx_np list element */
39#endif
40}; 34};
41 35
42struct netpoll_info { 36struct netpoll_info {
@@ -50,12 +44,6 @@ struct netpoll_info {
50 44
51 struct netpoll *netpoll; 45 struct netpoll *netpoll;
52 struct rcu_head rcu; 46 struct rcu_head rcu;
53
54#ifdef CONFIG_NETPOLL_TRAP
55 spinlock_t rx_lock;
56 struct list_head rx_np; /* netpolls that registered an rx_skb_hook */
57 struct sk_buff_head neigh_tx; /* list of neigh requests to reply to */
58#endif
59}; 47};
60 48
61#ifdef CONFIG_NETPOLL 49#ifdef CONFIG_NETPOLL
@@ -84,78 +72,6 @@ static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
84 local_irq_restore(flags); 72 local_irq_restore(flags);
85} 73}
86 74
87#ifdef CONFIG_NETPOLL_TRAP
88int netpoll_trap(void);
89void netpoll_set_trap(int trap);
90int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo);
91static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
92{
93 return !list_empty(&npinfo->rx_np);
94}
95
96static inline bool netpoll_rx_on(struct sk_buff *skb)
97{
98 struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo);
99
100 return npinfo && netpoll_rx_processing(npinfo);
101}
102
103static inline bool netpoll_rx(struct sk_buff *skb)
104{
105 struct netpoll_info *npinfo;
106 unsigned long flags;
107 bool ret = false;
108
109 local_irq_save(flags);
110
111 if (!netpoll_rx_on(skb))
112 goto out;
113
114 npinfo = rcu_dereference_bh(skb->dev->npinfo);
115 spin_lock(&npinfo->rx_lock);
116 /* check rx_processing again with the lock held */
117 if (netpoll_rx_processing(npinfo) && __netpoll_rx(skb, npinfo))
118 ret = true;
119 spin_unlock(&npinfo->rx_lock);
120
121out:
122 local_irq_restore(flags);
123 return ret;
124}
125
126static inline int netpoll_receive_skb(struct sk_buff *skb)
127{
128 if (!list_empty(&skb->dev->napi_list))
129 return netpoll_rx(skb);
130 return 0;
131}
132
133#else
134static inline int netpoll_trap(void)
135{
136 return 0;
137}
138static inline void netpoll_set_trap(int trap)
139{
140}
141static inline bool netpoll_rx_processing(struct netpoll_info *npinfo)
142{
143 return false;
144}
145static inline bool netpoll_rx(struct sk_buff *skb)
146{
147 return false;
148}
149static inline bool netpoll_rx_on(struct sk_buff *skb)
150{
151 return false;
152}
153static inline int netpoll_receive_skb(struct sk_buff *skb)
154{
155 return 0;
156}
157#endif
158
159#ifdef CONFIG_NETPOLL 75#ifdef CONFIG_NETPOLL
160static inline void *netpoll_poll_lock(struct napi_struct *napi) 76static inline void *netpoll_poll_lock(struct napi_struct *napi)
161{ 77{