diff options
-rw-r--r-- | drivers/net/sfc/efx.c | 20 |
1 files changed, 14 insertions, 6 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c index f6131e578b44..11ee0d4407c4 100644 --- a/drivers/net/sfc/efx.c +++ b/drivers/net/sfc/efx.c | |||
@@ -199,11 +199,12 @@ static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | |||
199 | */ | 199 | */ |
200 | static inline void efx_channel_processed(struct efx_channel *channel) | 200 | static inline void efx_channel_processed(struct efx_channel *channel) |
201 | { | 201 | { |
202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | 202 | /* The interrupt handler for this channel may set work_pending |
203 | * with finishing processing, a new interrupt will be raised. | 203 | * as soon as we acknowledge the events we've seen. Make sure |
204 | */ | 204 | * it's cleared before then. */ |
205 | channel->work_pending = 0; | 205 | channel->work_pending = 0; |
206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | 206 | smp_wmb(); |
207 | |||
207 | falcon_eventq_read_ack(channel); | 208 | falcon_eventq_read_ack(channel); |
208 | } | 209 | } |
209 | 210 | ||
@@ -427,9 +428,12 @@ static void efx_start_channel(struct efx_channel *channel) | |||
427 | netif_napi_add(channel->napi_dev, &channel->napi_str, | 428 | netif_napi_add(channel->napi_dev, &channel->napi_str, |
428 | efx_poll, napi_weight); | 429 | efx_poll, napi_weight); |
429 | 430 | ||
431 | /* The interrupt handler for this channel may set work_pending | ||
432 | * as soon as we enable it. Make sure it's cleared before | ||
433 | * then. Similarly, make sure it sees the enabled flag set. */ | ||
430 | channel->work_pending = 0; | 434 | channel->work_pending = 0; |
431 | channel->enabled = 1; | 435 | channel->enabled = 1; |
432 | smp_wmb(); /* ensure channel updated before first interrupt */ | 436 | smp_wmb(); |
433 | 437 | ||
434 | napi_enable(&channel->napi_str); | 438 | napi_enable(&channel->napi_str); |
435 | 439 | ||
@@ -1332,13 +1336,17 @@ static int efx_net_stop(struct net_device *net_dev) | |||
1332 | return 0; | 1336 | return 0; |
1333 | } | 1337 | } |
1334 | 1338 | ||
1335 | /* Context: process, dev_base_lock held, non-blocking. */ | 1339 | /* Context: process, dev_base_lock or RTNL held, non-blocking. */ |
1336 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | 1340 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) |
1337 | { | 1341 | { |
1338 | struct efx_nic *efx = net_dev->priv; | 1342 | struct efx_nic *efx = net_dev->priv; |
1339 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | 1343 | struct efx_mac_stats *mac_stats = &efx->mac_stats; |
1340 | struct net_device_stats *stats = &net_dev->stats; | 1344 | struct net_device_stats *stats = &net_dev->stats; |
1341 | 1345 | ||
1346 | /* Update stats if possible, but do not wait if another thread | ||
1347 | * is updating them (or resetting the NIC); slightly stale | ||
1348 | * stats are acceptable. | ||
1349 | */ | ||
1342 | if (!spin_trylock(&efx->stats_lock)) | 1350 | if (!spin_trylock(&efx->stats_lock)) |
1343 | return stats; | 1351 | return stats; |
1344 | if (efx->state == STATE_RUNNING) { | 1352 | if (efx->state == STATE_RUNNING) { |