aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/netpoll.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/netpoll.h')
-rw-r--r--include/linux/netpoll.h48
1 files changed, 37 insertions, 11 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h
index c0d8b90c5202..5ade54a78dbb 100644
--- a/include/linux/netpoll.h
+++ b/include/linux/netpoll.h
@@ -9,6 +9,7 @@
9 9
10#include <linux/netdevice.h> 10#include <linux/netdevice.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/rcupdate.h>
12#include <linux/list.h> 13#include <linux/list.h>
13 14
14struct netpoll; 15struct netpoll;
@@ -16,14 +17,20 @@ struct netpoll;
16struct netpoll { 17struct netpoll {
17 struct net_device *dev; 18 struct net_device *dev;
18 char dev_name[16], *name; 19 char dev_name[16], *name;
19 int rx_flags;
20 void (*rx_hook)(struct netpoll *, int, char *, int); 20 void (*rx_hook)(struct netpoll *, int, char *, int);
21 void (*drop)(struct sk_buff *skb); 21 void (*drop)(struct sk_buff *skb);
22 u32 local_ip, remote_ip; 22 u32 local_ip, remote_ip;
23 u16 local_port, remote_port; 23 u16 local_port, remote_port;
24 unsigned char local_mac[6], remote_mac[6]; 24 unsigned char local_mac[6], remote_mac[6];
25};
26
27struct netpoll_info {
25 spinlock_t poll_lock; 28 spinlock_t poll_lock;
26 int poll_owner; 29 int poll_owner;
30 int tries;
31 int rx_flags;
32 spinlock_t rx_lock;
33 struct netpoll *rx_np; /* netpoll that registered an rx_hook */
27}; 34};
28 35
29void netpoll_poll(struct netpoll *np); 36void netpoll_poll(struct netpoll *np);
@@ -39,28 +46,47 @@ void netpoll_queue(struct sk_buff *skb);
39#ifdef CONFIG_NETPOLL 46#ifdef CONFIG_NETPOLL
40static inline int netpoll_rx(struct sk_buff *skb) 47static inline int netpoll_rx(struct sk_buff *skb)
41{ 48{
42 return skb->dev->np && skb->dev->np->rx_flags && __netpoll_rx(skb); 49 struct netpoll_info *npinfo = skb->dev->npinfo;
50 unsigned long flags;
51 int ret = 0;
52
53 if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags))
54 return 0;
55
56 spin_lock_irqsave(&npinfo->rx_lock, flags);
57 /* check rx_flags again with the lock held */
58 if (npinfo->rx_flags && __netpoll_rx(skb))
59 ret = 1;
60 spin_unlock_irqrestore(&npinfo->rx_lock, flags);
61
62 return ret;
43} 63}
44 64
45static inline void netpoll_poll_lock(struct net_device *dev) 65static inline void *netpoll_poll_lock(struct net_device *dev)
46{ 66{
47 if (dev->np) { 67 rcu_read_lock(); /* deal with race on ->npinfo */
48 spin_lock(&dev->np->poll_lock); 68 if (dev->npinfo) {
49 dev->np->poll_owner = smp_processor_id(); 69 spin_lock(&dev->npinfo->poll_lock);
70 dev->npinfo->poll_owner = smp_processor_id();
71 return dev->npinfo;
50 } 72 }
73 return NULL;
51} 74}
52 75
53static inline void netpoll_poll_unlock(struct net_device *dev) 76static inline void netpoll_poll_unlock(void *have)
54{ 77{
55 if (dev->np) { 78 struct netpoll_info *npi = have;
56 spin_unlock(&dev->np->poll_lock); 79
57 dev->np->poll_owner = -1; 80 if (npi) {
81 npi->poll_owner = -1;
82 spin_unlock(&npi->poll_lock);
58 } 83 }
84 rcu_read_unlock();
59} 85}
60 86
61#else 87#else
62#define netpoll_rx(a) 0 88#define netpoll_rx(a) 0
63#define netpoll_poll_lock(a) 89#define netpoll_poll_lock(a) 0
64#define netpoll_poll_unlock(a) 90#define netpoll_poll_unlock(a)
65#endif 91#endif
66 92