From a0d3bea3cf6c7c1b53a46432bd490b5dc784ca42 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Thu, 11 Aug 2005 16:05:50 -0700 Subject: [NET]: Make skb->protocol __be16 There are many instances of skb->protocol = htons(ETH_P_*); skb->protocol = __constant_htons(ETH_P_*); and skb->protocol = *_type_trans(...); Most of *_type_trans() are already endian-annotated, so, let's shift attention on other warnings. Signed-off-by: Alexey Dobriyan Signed-off-by: David S. Miller --- include/linux/skbuff.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 0061c9470482..948527e42a60 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -255,7 +255,7 @@ struct sk_buff { nohdr:1; /* 3 bits spare */ __u8 pkt_type; - __u16 protocol; + __be16 protocol; void (*destructor)(struct sk_buff *skb); #ifdef CONFIG_NETFILTER -- cgit v1.2.2 From 0db1d6fc1ea051af49ebe03c503d23996a7c5bbb Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:25:54 -0700 Subject: [NETPOLL]: add retry timeout Add limited retry logic to netpoll_send_skb Each time we attempt to send, decrement our per-device retry counter. On every successful send, we reset the counter. We delay 50us between attempts with up to 20000 retries for a total of 1 second. After we've exhausted our retries, subsequent failed attempts will try only once until reset by success. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- include/linux/netpoll.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index bcd0ac33f592..be68d94b03d5 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -26,6 +26,7 @@ struct netpoll { struct netpoll_info { spinlock_t poll_lock; int poll_owner; + int tries; int rx_flags; spinlock_t rx_lock; struct netpoll *rx_np; /* netpoll that registered an rx_hook */ -- cgit v1.2.2 From 53fb95d3c14290fd6ee808b221e35493f096246f Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Thu, 11 Aug 2005 19:27:43 -0700 Subject: [NETPOLL]: fix initialization/NAPI race This fixes a race during initialization with the NAPI softirq processing by using an RCU approach. This race was discovered when refill_skbs() was added to the setup code. Signed-off-by: Matt Mackall Signed-off-by: David S. Miller --- include/linux/netpoll.h | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index be68d94b03d5..5ade54a78dbb 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -9,6 +9,7 @@ #include #include +#include #include struct netpoll; @@ -61,25 +62,31 @@ static inline int netpoll_rx(struct sk_buff *skb) return ret; } -static inline void netpoll_poll_lock(struct net_device *dev) +static inline void *netpoll_poll_lock(struct net_device *dev) { + rcu_read_lock(); /* deal with race on ->npinfo */ if (dev->npinfo) { spin_lock(&dev->npinfo->poll_lock); dev->npinfo->poll_owner = smp_processor_id(); + return dev->npinfo; } + return NULL; } -static inline void netpoll_poll_unlock(struct net_device *dev) +static inline void netpoll_poll_unlock(void *have) { - if (dev->npinfo) { - dev->npinfo->poll_owner = -1; - spin_unlock(&dev->npinfo->poll_lock); + struct netpoll_info *npi = have; + + if (npi) { + npi->poll_owner = -1; + spin_unlock(&npi->poll_lock); } + rcu_read_unlock(); } #else #define netpoll_rx(a) 0 -#define netpoll_poll_lock(a) +#define netpoll_poll_lock(a) 0 #define netpoll_poll_unlock(a) #endif -- cgit v1.2.2