diff options
author | Stephen Hemminger <shemminger@linux-foundation.org> | 2007-11-19 22:37:09 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-01-28 17:54:18 -0500 |
commit | c7b6ea24b43afb5749cb704e143df19d70e23dea (patch) | |
tree | e46281225b1cec008b5a65ece47b8d5f1a8d7abd | |
parent | 33f807ba0d9259e7c75c7a2ce8bd2787e5b540c7 (diff) |
[NETPOLL]: Don't need rx_flags.
The rx_flags variable is redundant. Turning rx on/off is done
via setting the rx_np pointer.
Signed-off-by: Stephen Hemminger <shemminger@linux-foundation.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/netpoll.h | 7 | ||||
-rw-r--r-- | net/core/netpoll.c | 4 |
2 files changed, 3 insertions, 8 deletions
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index e3d79593fb3a..a0525a1f4715 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -25,7 +25,6 @@ struct netpoll { | |||
25 | 25 | ||
26 | struct netpoll_info { | 26 | struct netpoll_info { |
27 | atomic_t refcnt; | 27 | atomic_t refcnt; |
28 | int rx_flags; | ||
29 | spinlock_t rx_lock; | 28 | spinlock_t rx_lock; |
30 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ | 29 | struct netpoll *rx_np; /* netpoll that registered an rx_hook */ |
31 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ | 30 | struct sk_buff_head arp_tx; /* list of arp requests to reply to */ |
@@ -51,12 +50,12 @@ static inline int netpoll_rx(struct sk_buff *skb) | |||
51 | unsigned long flags; | 50 | unsigned long flags; |
52 | int ret = 0; | 51 | int ret = 0; |
53 | 52 | ||
54 | if (!npinfo || (!npinfo->rx_np && !npinfo->rx_flags)) | 53 | if (!npinfo || !npinfo->rx_np) |
55 | return 0; | 54 | return 0; |
56 | 55 | ||
57 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 56 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
58 | /* check rx_flags again with the lock held */ | 57 | /* check rx_np again with the lock held */ |
59 | if (npinfo->rx_flags && __netpoll_rx(skb)) | 58 | if (npinfo->rx_np && __netpoll_rx(skb)) |
60 | ret = 1; | 59 | ret = 1; |
61 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 60 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
62 | 61 | ||
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 9e3aea0bd369..b1d5acd2fc7a 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -39,7 +39,6 @@ static struct sk_buff_head skb_pool; | |||
39 | static atomic_t trapped; | 39 | static atomic_t trapped; |
40 | 40 | ||
41 | #define USEC_PER_POLL 50 | 41 | #define USEC_PER_POLL 50 |
42 | #define NETPOLL_RX_ENABLED 1 | ||
43 | 42 | ||
44 | #define MAX_SKB_SIZE \ | 43 | #define MAX_SKB_SIZE \ |
45 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ | 44 | (MAX_UDP_CHUNK + sizeof(struct udphdr) + \ |
@@ -675,7 +674,6 @@ int netpoll_setup(struct netpoll *np) | |||
675 | goto release; | 674 | goto release; |
676 | } | 675 | } |
677 | 676 | ||
678 | npinfo->rx_flags = 0; | ||
679 | npinfo->rx_np = NULL; | 677 | npinfo->rx_np = NULL; |
680 | 678 | ||
681 | spin_lock_init(&npinfo->rx_lock); | 679 | spin_lock_init(&npinfo->rx_lock); |
@@ -757,7 +755,6 @@ int netpoll_setup(struct netpoll *np) | |||
757 | 755 | ||
758 | if (np->rx_hook) { | 756 | if (np->rx_hook) { |
759 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 757 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
760 | npinfo->rx_flags |= NETPOLL_RX_ENABLED; | ||
761 | npinfo->rx_np = np; | 758 | npinfo->rx_np = np; |
762 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 759 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
763 | } | 760 | } |
@@ -799,7 +796,6 @@ void netpoll_cleanup(struct netpoll *np) | |||
799 | if (npinfo->rx_np == np) { | 796 | if (npinfo->rx_np == np) { |
800 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 797 | spin_lock_irqsave(&npinfo->rx_lock, flags); |
801 | npinfo->rx_np = NULL; | 798 | npinfo->rx_np = NULL; |
802 | npinfo->rx_flags &= ~NETPOLL_RX_ENABLED; | ||
803 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 799 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); |
804 | } | 800 | } |
805 | 801 | ||