aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-10-30 00:28:47 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-30 01:37:28 -0400
commit0a7606c121d58c1831805262c5b764e181429e7d (patch)
tree4ba68e147c569c83dfedc3b45edf88ce21cde001 /net
parentb0a713e9e6091b30d0e615d2be88017a57f37c76 (diff)
[NET]: Fix race between poll_napi() and net_rx_action()
netpoll_poll_lock() synchronizes the ->poll() invocation code paths, but once we have the lock we have to make sure that NAPI_STATE_SCHED is still set. Otherwise we get: cpu 0 cpu 1 net_rx_action() poll_napi() netpoll_poll_lock() ... spin on ->poll_lock ->poll() netif_rx_complete netpoll_poll_unlock() acquire ->poll_lock() ->poll() netif_rx_complete() CRASH Based upon a bug report from Tina Yang. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/dev.c10
-rw-r--r--net/core/netpoll.c37
2 files changed, 37 insertions, 10 deletions
diff --git a/net/core/dev.c b/net/core/dev.c
index 853c8b575f1d..02e7d8377c4a 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2172,7 +2172,15 @@ static void net_rx_action(struct softirq_action *h)
2172 2172
2173 weight = n->weight; 2173 weight = n->weight;
2174 2174
2175 work = n->poll(n, weight); 2175 /* This NAPI_STATE_SCHED test is for avoiding a race
2176 * with netpoll's poll_napi(). Only the entity which
2177 * obtains the lock and sees NAPI_STATE_SCHED set will
2178 * actually make the ->poll() call. Therefore we avoid
2179 * accidently calling ->poll() when NAPI is not scheduled.
2180 */
2181 work = 0;
2182 if (test_bit(NAPI_STATE_SCHED, &n->state))
2183 work = n->poll(n, weight);
2176 2184
2177 WARN_ON_ONCE(work > weight); 2185 WARN_ON_ONCE(work > weight);
2178 2186
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index bf8d18f1b013..c499b5c69bed 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -116,6 +116,29 @@ static __sum16 checksum_udp(struct sk_buff *skb, struct udphdr *uh,
116 * network adapter, forcing superfluous retries and possibly timeouts. 116 * network adapter, forcing superfluous retries and possibly timeouts.
117 * Thus, we set our budget to greater than 1. 117 * Thus, we set our budget to greater than 1.
118 */ 118 */
119static int poll_one_napi(struct netpoll_info *npinfo,
120 struct napi_struct *napi, int budget)
121{
122 int work;
123
124 /* net_rx_action's ->poll() invocations and our's are
125 * synchronized by this test which is only made while
126 * holding the napi->poll_lock.
127 */
128 if (!test_bit(NAPI_STATE_SCHED, &napi->state))
129 return budget;
130
131 npinfo->rx_flags |= NETPOLL_RX_DROP;
132 atomic_inc(&trapped);
133
134 work = napi->poll(napi, budget);
135
136 atomic_dec(&trapped);
137 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
138
139 return budget - work;
140}
141
119static void poll_napi(struct netpoll *np) 142static void poll_napi(struct netpoll *np)
120{ 143{
121 struct netpoll_info *npinfo = np->dev->npinfo; 144 struct netpoll_info *npinfo = np->dev->npinfo;
@@ -123,17 +146,13 @@ static void poll_napi(struct netpoll *np)
123 int budget = 16; 146 int budget = 16;
124 147
125 list_for_each_entry(napi, &np->dev->napi_list, dev_list) { 148 list_for_each_entry(napi, &np->dev->napi_list, dev_list) {
126 if (test_bit(NAPI_STATE_SCHED, &napi->state) && 149 if (napi->poll_owner != smp_processor_id() &&
127 napi->poll_owner != smp_processor_id() &&
128 spin_trylock(&napi->poll_lock)) { 150 spin_trylock(&napi->poll_lock)) {
129 npinfo->rx_flags |= NETPOLL_RX_DROP; 151 budget = poll_one_napi(npinfo, napi, budget);
130 atomic_inc(&trapped);
131
132 napi->poll(napi, budget);
133
134 atomic_dec(&trapped);
135 npinfo->rx_flags &= ~NETPOLL_RX_DROP;
136 spin_unlock(&napi->poll_lock); 152 spin_unlock(&napi->poll_lock);
153
154 if (!budget)
155 break;
137 } 156 }
138 } 157 }
139} 158}