aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2017-03-24 13:07:53 -0400
committerDavid S. Miller <davem@davemloft.net>2017-03-24 23:49:30 -0400
commit545cd5e5ec5477c325e4098b6fd21213dceda408 (patch)
tree43a2aaa0f59d8721b8ab25d0f52749f798f42122
parentdcb421f4279f362c3eef7479616c76588b74d782 (diff)
net: Busy polling should ignore sender CPUs
This patch is a cleanup/fix for NAPI IDs following the changes that made it so that sender_cpu and napi_id were doing a better job of sharing the same location in the sk_buff. One issue I found is that we weren't validating the napi_id as being valid before we started trying to setup the busy polling. This change corrects that by using the MIN_NAPI_ID value that is now used in both allocating the NAPI IDs, as well as validating them. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/busy_poll.h9
-rw-r--r--net/core/dev.c13
2 files changed, 16 insertions, 6 deletions
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c0452de83086..3fcda9e70c3f 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -35,6 +35,12 @@ struct napi_struct;
35extern unsigned int sysctl_net_busy_read __read_mostly; 35extern unsigned int sysctl_net_busy_read __read_mostly;
36extern unsigned int sysctl_net_busy_poll __read_mostly; 36extern unsigned int sysctl_net_busy_poll __read_mostly;
37 37
38/* 0 - Reserved to indicate value not set
39 * 1..NR_CPUS - Reserved for sender_cpu
40 * NR_CPUS+1..~0 - Region available for NAPI IDs
41 */
42#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
43
38static inline bool net_busy_loop_on(void) 44static inline bool net_busy_loop_on(void)
39{ 45{
40 return sysctl_net_busy_poll; 46 return sysctl_net_busy_poll;
@@ -58,10 +64,9 @@ static inline unsigned long busy_loop_end_time(void)
58 64
59static inline bool sk_can_busy_loop(const struct sock *sk) 65static inline bool sk_can_busy_loop(const struct sock *sk)
60{ 66{
61 return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current); 67 return sk->sk_ll_usec && !signal_pending(current);
62} 68}
63 69
64
65static inline bool busy_loop_timeout(unsigned long end_time) 70static inline bool busy_loop_timeout(unsigned long end_time)
66{ 71{
67 unsigned long now = busy_loop_us_clock(); 72 unsigned long now = busy_loop_us_clock();
diff --git a/net/core/dev.c b/net/core/dev.c
index 7869ae3837ca..ab337bf5bbf4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5066,15 +5066,20 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
5066 int (*napi_poll)(struct napi_struct *napi, int budget); 5066 int (*napi_poll)(struct napi_struct *napi, int budget);
5067 void *have_poll_lock = NULL; 5067 void *have_poll_lock = NULL;
5068 struct napi_struct *napi; 5068 struct napi_struct *napi;
5069 unsigned int napi_id;
5069 int rc; 5070 int rc;
5070 5071
5071restart: 5072restart:
5073 napi_id = READ_ONCE(sk->sk_napi_id);
5074 if (napi_id < MIN_NAPI_ID)
5075 return 0;
5076
5072 rc = false; 5077 rc = false;
5073 napi_poll = NULL; 5078 napi_poll = NULL;
5074 5079
5075 rcu_read_lock(); 5080 rcu_read_lock();
5076 5081
5077 napi = napi_by_id(sk->sk_napi_id); 5082 napi = napi_by_id(napi_id);
5078 if (!napi) 5083 if (!napi)
5079 goto out; 5084 goto out;
5080 5085
@@ -5143,10 +5148,10 @@ static void napi_hash_add(struct napi_struct *napi)
5143 5148
5144 spin_lock(&napi_hash_lock); 5149 spin_lock(&napi_hash_lock);
5145 5150
5146 /* 0..NR_CPUS+1 range is reserved for sender_cpu use */ 5151 /* 0..NR_CPUS range is reserved for sender_cpu use */
5147 do { 5152 do {
5148 if (unlikely(++napi_gen_id < NR_CPUS + 1)) 5153 if (unlikely(++napi_gen_id < MIN_NAPI_ID))
5149 napi_gen_id = NR_CPUS + 1; 5154 napi_gen_id = MIN_NAPI_ID;
5150 } while (napi_by_id(napi_gen_id)); 5155 } while (napi_by_id(napi_gen_id));
5151 napi->napi_id = napi_gen_id; 5156 napi->napi_id = napi_gen_id;
5152 5157