aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc
diff options
context:
space:
mode:
authorSteve Hodgson <shodgson@solarflare.com>2010-06-01 07:19:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-02 05:21:08 -0400
commit90d683afd1395016775c8d90508614f8d3000b81 (patch)
tree33c239c5cd1e607a22dca1af0fd3615cee584ef9 /drivers/net/sfc
parentd730dc527a5abd4717f6320e82cfce54edc882a3 (diff)
sfc: Remove efx_rx_queue::add_lock
Ensure that efx_fast_push_rx_descriptors() must only run from efx_process_channel() [NAPI], or when napi_disable() has been executed. Reimplement the slow fill by sending an event to the channel, so that NAPI runs, and hanging the subsequent fast fill off the event handler. Replace the sfc_refill workqueue and delayed work items with a timer. We do not need to stop this timer in efx_flush_all() because it's safe to send the event always; receiving it will be delayed until NAPI is restarted. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc')
-rw-r--r--drivers/net/sfc/efx.c44
-rw-r--r--drivers/net/sfc/efx.h4
-rw-r--r--drivers/net/sfc/net_driver.h10
-rw-r--r--drivers/net/sfc/nic.c49
-rw-r--r--drivers/net/sfc/nic.h1
-rw-r--r--drivers/net/sfc/rx.c95
6 files changed, 73 insertions, 130 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d1a1d32e73ee..5d9ef05e6abb 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -93,13 +93,6 @@ const char *efx_reset_type_names[] = {
93 93
94#define EFX_MAX_MTU (9 * 1024) 94#define EFX_MAX_MTU (9 * 1024)
95 95
96/* RX slow fill workqueue. If memory allocation fails in the fast path,
97 * a work item is pushed onto this work queue to retry the allocation later,
98 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
99 * workqueue, there is nothing to be gained in making it per NIC
100 */
101static struct workqueue_struct *refill_workqueue;
102
103/* Reset workqueue. If any NIC has a hardware failure then a reset will be 96/* Reset workqueue. If any NIC has a hardware failure then a reset will be
104 * queued onto this work queue. This is not a per-nic work queue, because 97 * queued onto this work queue. This is not a per-nic work queue, because
105 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 98 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -516,11 +509,11 @@ static void efx_start_channel(struct efx_channel *channel)
516 channel->enabled = true; 509 channel->enabled = true;
517 smp_wmb(); 510 smp_wmb();
518 511
519 napi_enable(&channel->napi_str); 512 /* Fill the queues before enabling NAPI */
520
521 /* Load up RX descriptors */
522 efx_for_each_channel_rx_queue(rx_queue, channel) 513 efx_for_each_channel_rx_queue(rx_queue, channel)
523 efx_fast_push_rx_descriptors(rx_queue); 514 efx_fast_push_rx_descriptors(rx_queue);
515
516 napi_enable(&channel->napi_str);
524} 517}
525 518
526/* This disables event queue processing and packet transmission. 519/* This disables event queue processing and packet transmission.
@@ -529,8 +522,6 @@ static void efx_start_channel(struct efx_channel *channel)
529 */ 522 */
530static void efx_stop_channel(struct efx_channel *channel) 523static void efx_stop_channel(struct efx_channel *channel)
531{ 524{
532 struct efx_rx_queue *rx_queue;
533
534 if (!channel->enabled) 525 if (!channel->enabled)
535 return; 526 return;
536 527
@@ -538,12 +529,6 @@ static void efx_stop_channel(struct efx_channel *channel)
538 529
539 channel->enabled = false; 530 channel->enabled = false;
540 napi_disable(&channel->napi_str); 531 napi_disable(&channel->napi_str);
541
542 /* Ensure that any worker threads have exited or will be no-ops */
543 efx_for_each_channel_rx_queue(rx_queue, channel) {
544 spin_lock_bh(&rx_queue->add_lock);
545 spin_unlock_bh(&rx_queue->add_lock);
546 }
547} 532}
548 533
549static void efx_fini_channels(struct efx_nic *efx) 534static void efx_fini_channels(struct efx_nic *efx)
@@ -595,9 +580,9 @@ static void efx_remove_channel(struct efx_channel *channel)
595 efx_remove_eventq(channel); 580 efx_remove_eventq(channel);
596} 581}
597 582
598void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 583void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
599{ 584{
600 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 585 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
601} 586}
602 587
603/************************************************************************** 588/**************************************************************************
@@ -1242,15 +1227,8 @@ static void efx_start_all(struct efx_nic *efx)
1242 * since we're holding the rtnl_lock at this point. */ 1227 * since we're holding the rtnl_lock at this point. */
1243static void efx_flush_all(struct efx_nic *efx) 1228static void efx_flush_all(struct efx_nic *efx)
1244{ 1229{
1245 struct efx_rx_queue *rx_queue;
1246
1247 /* Make sure the hardware monitor is stopped */ 1230 /* Make sure the hardware monitor is stopped */
1248 cancel_delayed_work_sync(&efx->monitor_work); 1231 cancel_delayed_work_sync(&efx->monitor_work);
1249
1250 /* Ensure that all RX slow refills are complete. */
1251 efx_for_each_rx_queue(rx_queue, efx)
1252 cancel_delayed_work_sync(&rx_queue->work);
1253
1254 /* Stop scheduled port reconfigurations */ 1232 /* Stop scheduled port reconfigurations */
1255 cancel_work_sync(&efx->mac_work); 1233 cancel_work_sync(&efx->mac_work);
1256} 1234}
@@ -2064,8 +2042,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2064 rx_queue->queue = i; 2042 rx_queue->queue = i;
2065 rx_queue->channel = &efx->channel[0]; /* for safety */ 2043 rx_queue->channel = &efx->channel[0]; /* for safety */
2066 rx_queue->buffer = NULL; 2044 rx_queue->buffer = NULL;
2067 spin_lock_init(&rx_queue->add_lock); 2045 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2068 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 2046 (unsigned long)rx_queue);
2069 } 2047 }
2070 2048
2071 efx->type = type; 2049 efx->type = type;
@@ -2436,11 +2414,6 @@ static int __init efx_init_module(void)
2436 if (rc) 2414 if (rc)
2437 goto err_notifier; 2415 goto err_notifier;
2438 2416
2439 refill_workqueue = create_workqueue("sfc_refill");
2440 if (!refill_workqueue) {
2441 rc = -ENOMEM;
2442 goto err_refill;
2443 }
2444 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2417 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2445 if (!reset_workqueue) { 2418 if (!reset_workqueue) {
2446 rc = -ENOMEM; 2419 rc = -ENOMEM;
@@ -2456,8 +2429,6 @@ static int __init efx_init_module(void)
2456 err_pci: 2429 err_pci:
2457 destroy_workqueue(reset_workqueue); 2430 destroy_workqueue(reset_workqueue);
2458 err_reset: 2431 err_reset:
2459 destroy_workqueue(refill_workqueue);
2460 err_refill:
2461 unregister_netdevice_notifier(&efx_netdev_notifier); 2432 unregister_netdevice_notifier(&efx_netdev_notifier);
2462 err_notifier: 2433 err_notifier:
2463 return rc; 2434 return rc;
@@ -2469,7 +2440,6 @@ static void __exit efx_exit_module(void)
2469 2440
2470 pci_unregister_driver(&efx_pci_driver); 2441 pci_unregister_driver(&efx_pci_driver);
2471 destroy_workqueue(reset_workqueue); 2442 destroy_workqueue(reset_workqueue);
2472 destroy_workqueue(refill_workqueue);
2473 unregister_netdevice_notifier(&efx_netdev_notifier); 2443 unregister_netdevice_notifier(&efx_netdev_notifier);
2474 2444
2475} 2445}
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index ffd708c5304a..e1e448887dfc 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -47,12 +47,12 @@ extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); 47extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
48extern void efx_rx_strategy(struct efx_channel *channel); 48extern void efx_rx_strategy(struct efx_channel *channel);
49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); 49extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
50extern void efx_rx_work(struct work_struct *data); 50extern void efx_rx_slow_fill(unsigned long context);
51extern void __efx_rx_packet(struct efx_channel *channel, 51extern void __efx_rx_packet(struct efx_channel *channel,
52 struct efx_rx_buffer *rx_buf, bool checksummed); 52 struct efx_rx_buffer *rx_buf, bool checksummed);
53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 53extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
54 unsigned int len, bool checksummed, bool discard); 54 unsigned int len, bool checksummed, bool discard);
55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 55extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
56#define EFX_RXQ_SIZE 1024 56#define EFX_RXQ_SIZE 1024
57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1) 57#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
58 58
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index ee0ea01c847e..45398039dee6 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,6 +18,7 @@
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/ethtool.h> 19#include <linux/ethtool.h>
20#include <linux/if_vlan.h> 20#include <linux/if_vlan.h>
21#include <linux/timer.h>
21#include <linux/mdio.h> 22#include <linux/mdio.h>
22#include <linux/list.h> 23#include <linux/list.h>
23#include <linux/pci.h> 24#include <linux/pci.h>
@@ -242,10 +243,6 @@ struct efx_rx_buffer {
242 * @added_count: Number of buffers added to the receive queue. 243 * @added_count: Number of buffers added to the receive queue.
243 * @notified_count: Number of buffers given to NIC (<= @added_count). 244 * @notified_count: Number of buffers given to NIC (<= @added_count).
244 * @removed_count: Number of buffers removed from the receive queue. 245 * @removed_count: Number of buffers removed from the receive queue.
245 * @add_lock: Receive queue descriptor add spin lock.
246 * This lock must be held in order to add buffers to the RX
247 * descriptor ring (rxd and buffer) and to update added_count (but
248 * not removed_count).
249 * @max_fill: RX descriptor maximum fill level (<= ring size) 246 * @max_fill: RX descriptor maximum fill level (<= ring size)
250 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill 247 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
251 * (<= @max_fill) 248 * (<= @max_fill)
@@ -259,7 +256,7 @@ struct efx_rx_buffer {
259 * overflow was observed. It should never be set. 256 * overflow was observed. It should never be set.
260 * @alloc_page_count: RX allocation strategy counter. 257 * @alloc_page_count: RX allocation strategy counter.
261 * @alloc_skb_count: RX allocation strategy counter. 258 * @alloc_skb_count: RX allocation strategy counter.
262 * @work: Descriptor push work thread 259 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
263 * @buf_page: Page for next RX buffer. 260 * @buf_page: Page for next RX buffer.
264 * We can use a single page for multiple RX buffers. This tracks 261 * We can use a single page for multiple RX buffers. This tracks
265 * the remaining space in the allocation. 262 * the remaining space in the allocation.
@@ -277,7 +274,6 @@ struct efx_rx_queue {
277 int added_count; 274 int added_count;
278 int notified_count; 275 int notified_count;
279 int removed_count; 276 int removed_count;
280 spinlock_t add_lock;
281 unsigned int max_fill; 277 unsigned int max_fill;
282 unsigned int fast_fill_trigger; 278 unsigned int fast_fill_trigger;
283 unsigned int fast_fill_limit; 279 unsigned int fast_fill_limit;
@@ -285,7 +281,7 @@ struct efx_rx_queue {
285 unsigned int min_overfill; 281 unsigned int min_overfill;
286 unsigned int alloc_page_count; 282 unsigned int alloc_page_count;
287 unsigned int alloc_skb_count; 283 unsigned int alloc_skb_count;
288 struct delayed_work work; 284 struct timer_list slow_fill;
289 unsigned int slow_fill_count; 285 unsigned int slow_fill_count;
290 286
291 struct page *buf_page; 287 struct page *buf_page;
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index ca9cf1a33803..0ee6fd367e6f 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -79,10 +79,14 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
79/* Depth of RX flush request fifo */ 79/* Depth of RX flush request fifo */
80#define EFX_RX_FLUSH_COUNT 4 80#define EFX_RX_FLUSH_COUNT 4
81 81
82/* Magic value for efx_generate_test_event() */ 82/* Generated event code for efx_generate_test_event() */
83#define EFX_CHANNEL_MAGIC(_channel) \ 83#define EFX_CHANNEL_MAGIC_TEST(_channel) \
84 (0x00010100 + (_channel)->channel) 84 (0x00010100 + (_channel)->channel)
85 85
86/* Generated event code for efx_generate_fill_event() */
87#define EFX_CHANNEL_MAGIC_FILL(_channel) \
88 (0x00010200 + (_channel)->channel)
89
86/************************************************************************** 90/**************************************************************************
87 * 91 *
88 * Solarstorm hardware access 92 * Solarstorm hardware access
@@ -854,6 +858,26 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
854 checksummed, discard); 858 checksummed, discard);
855} 859}
856 860
861static void
862efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
863{
864 struct efx_nic *efx = channel->efx;
865 unsigned code;
866
867 code = EFX_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
868 if (code == EFX_CHANNEL_MAGIC_TEST(channel))
869 ++channel->magic_count;
870 else if (code == EFX_CHANNEL_MAGIC_FILL(channel))
871 /* The queue must be empty, so we won't receive any rx
872 * events, so efx_process_channel() won't refill the
873 * queue. Refill it here */
874 efx_fast_push_rx_descriptors(&efx->rx_queue[channel->channel]);
875 else
876 EFX_LOG(efx, "channel %d received generated "
877 "event "EFX_QWORD_FMT"\n", channel->channel,
878 EFX_QWORD_VAL(*event));
879}
880
857/* Global events are basically PHY events */ 881/* Global events are basically PHY events */
858static void 882static void
859efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event) 883efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
@@ -997,13 +1021,7 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
997 } 1021 }
998 break; 1022 break;
999 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1023 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1000 if (EFX_QWORD_FIELD(event, FSF_AZ_DRV_GEN_EV_MAGIC) 1024 efx_handle_generated_event(channel, &event);
1001 == EFX_CHANNEL_MAGIC(channel))
1002 ++channel->magic_count;
1003
1004 EFX_LOG(channel->efx, "channel %d received generated "
1005 "event "EFX_QWORD_FMT"\n", channel->channel,
1006 EFX_QWORD_VAL(event));
1007 break; 1025 break;
1008 case FSE_AZ_EV_CODE_GLOBAL_EV: 1026 case FSE_AZ_EV_CODE_GLOBAL_EV:
1009 efx_handle_global_event(channel, &event); 1027 efx_handle_global_event(channel, &event);
@@ -1096,7 +1114,18 @@ void efx_nic_remove_eventq(struct efx_channel *channel)
1096 1114
1097void efx_nic_generate_test_event(struct efx_channel *channel) 1115void efx_nic_generate_test_event(struct efx_channel *channel)
1098{ 1116{
1099 unsigned int magic = EFX_CHANNEL_MAGIC(channel); 1117 unsigned int magic = EFX_CHANNEL_MAGIC_TEST(channel);
1118 efx_qword_t test_event;
1119
1120 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1121 FSE_AZ_EV_CODE_DRV_GEN_EV,
1122 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1123 efx_generate_event(channel, &test_event);
1124}
1125
1126void efx_nic_generate_fill_event(struct efx_channel *channel)
1127{
1128 unsigned int magic = EFX_CHANNEL_MAGIC_FILL(channel);
1100 efx_qword_t test_event; 1129 efx_qword_t test_event;
1101 1130
1102 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE, 1131 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 186aab564c4a..95770e15115d 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -191,6 +191,7 @@ extern int efx_nic_rx_xoff_thresh, efx_nic_rx_xon_thresh;
191extern int efx_nic_init_interrupt(struct efx_nic *efx); 191extern int efx_nic_init_interrupt(struct efx_nic *efx);
192extern void efx_nic_enable_interrupts(struct efx_nic *efx); 192extern void efx_nic_enable_interrupts(struct efx_nic *efx);
193extern void efx_nic_generate_test_event(struct efx_channel *channel); 193extern void efx_nic_generate_test_event(struct efx_channel *channel);
194extern void efx_nic_generate_fill_event(struct efx_channel *channel);
194extern void efx_nic_generate_interrupt(struct efx_nic *efx); 195extern void efx_nic_generate_interrupt(struct efx_nic *efx);
195extern void efx_nic_disable_interrupts(struct efx_nic *efx); 196extern void efx_nic_disable_interrupts(struct efx_nic *efx);
196extern void efx_nic_fini_interrupt(struct efx_nic *efx); 197extern void efx_nic_fini_interrupt(struct efx_nic *efx);
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index e308818b9f55..bf1e55e7869e 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -276,28 +276,25 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
276/** 276/**
277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly 277 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
278 * @rx_queue: RX descriptor queue 278 * @rx_queue: RX descriptor queue
279 * @retry: Recheck the fill level
280 * This will aim to fill the RX descriptor queue up to 279 * This will aim to fill the RX descriptor queue up to
281 * @rx_queue->@fast_fill_limit. If there is insufficient atomic 280 * @rx_queue->@fast_fill_limit. If there is insufficient atomic
282 * memory to do so, the caller should retry. 281 * memory to do so, a slow fill will be scheduled.
282 *
283 * The caller must provide serialisation (none is used here). In practise,
284 * this means this function must run from the NAPI handler, or be called
285 * when NAPI is disabled.
283 */ 286 */
284static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, 287void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
285 int retry)
286{ 288{
287 struct efx_rx_buffer *rx_buf; 289 struct efx_rx_buffer *rx_buf;
288 unsigned fill_level, index; 290 unsigned fill_level, index;
289 int i, space, rc = 0; 291 int i, space, rc = 0;
290 292
291 /* Calculate current fill level. Do this outside the lock, 293 /* Calculate current fill level, and exit if we don't need to fill */
292 * because most of the time we'll end up not wanting to do the
293 * fill anyway.
294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 294 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE); 295 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297
298 /* Don't fill if we don't need to */
299 if (fill_level >= rx_queue->fast_fill_trigger) 296 if (fill_level >= rx_queue->fast_fill_trigger)
300 return 0; 297 return;
301 298
302 /* Record minimum fill level */ 299 /* Record minimum fill level */
303 if (unlikely(fill_level < rx_queue->min_fill)) { 300 if (unlikely(fill_level < rx_queue->min_fill)) {
@@ -305,20 +302,9 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
305 rx_queue->min_fill = fill_level; 302 rx_queue->min_fill = fill_level;
306 } 303 }
307 304
308 /* Acquire RX add lock. If this lock is contended, then a fast
309 * fill must already be in progress (e.g. in the refill
310 * tasklet), so we don't need to do anything
311 */
312 if (!spin_trylock_bh(&rx_queue->add_lock))
313 return -1;
314
315 retry:
316 /* Recalculate current fill level now that we have the lock */
317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
319 space = rx_queue->fast_fill_limit - fill_level; 305 space = rx_queue->fast_fill_limit - fill_level;
320 if (space < EFX_RX_BATCH) 306 if (space < EFX_RX_BATCH)
321 goto out_unlock; 307 return;
322 308
323 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" 309 EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from"
324 " level %d to level %d using %s allocation\n", 310 " level %d to level %d using %s allocation\n",
@@ -330,8 +316,13 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
330 index = rx_queue->added_count & EFX_RXQ_MASK; 316 index = rx_queue->added_count & EFX_RXQ_MASK;
331 rx_buf = efx_rx_buffer(rx_queue, index); 317 rx_buf = efx_rx_buffer(rx_queue, index);
332 rc = efx_init_rx_buffer(rx_queue, rx_buf); 318 rc = efx_init_rx_buffer(rx_queue, rx_buf);
333 if (unlikely(rc)) 319 if (unlikely(rc)) {
320 /* Ensure that we don't leave the rx queue
321 * empty */
322 if (rx_queue->added_count == rx_queue->removed_count)
323 efx_schedule_slow_fill(rx_queue);
334 goto out; 324 goto out;
325 }
335 ++rx_queue->added_count; 326 ++rx_queue->added_count;
336 } 327 }
337 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); 328 } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH);
@@ -343,61 +334,16 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
343 out: 334 out:
344 /* Send write pointer to card. */ 335 /* Send write pointer to card. */
345 efx_nic_notify_rx_desc(rx_queue); 336 efx_nic_notify_rx_desc(rx_queue);
346
347 /* If the fast fill is running inside from the refill tasklet, then
348 * for SMP systems it may be running on a different CPU to
349 * RX event processing, which means that the fill level may now be
350 * out of date. */
351 if (unlikely(retry && (rc == 0)))
352 goto retry;
353
354 out_unlock:
355 spin_unlock_bh(&rx_queue->add_lock);
356
357 return rc;
358} 337}
359 338
360/** 339void efx_rx_slow_fill(unsigned long context)
361 * efx_fast_push_rx_descriptors - push new RX descriptors quickly
362 * @rx_queue: RX descriptor queue
363 *
364 * This will aim to fill the RX descriptor queue up to
365 * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so,
366 * it will schedule a work item to immediately continue the fast fill
367 */
368void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue)
369{
370 int rc;
371
372 rc = __efx_fast_push_rx_descriptors(rx_queue, 0);
373 if (unlikely(rc)) {
374 /* Schedule the work item to run immediately. The hope is
375 * that work is immediately pending to free some memory
376 * (e.g. an RX event or TX completion)
377 */
378 efx_schedule_slow_fill(rx_queue, 0);
379 }
380}
381
382void efx_rx_work(struct work_struct *data)
383{ 340{
384 struct efx_rx_queue *rx_queue; 341 struct efx_rx_queue *rx_queue = (struct efx_rx_queue *)context;
385 int rc; 342 struct efx_channel *channel = rx_queue->channel;
386
387 rx_queue = container_of(data, struct efx_rx_queue, work.work);
388
389 if (unlikely(!rx_queue->channel->enabled))
390 return;
391
392 EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU "
393 "%d\n", rx_queue->queue, raw_smp_processor_id());
394 343
344 /* Post an event to cause NAPI to run and refill the queue */
345 efx_nic_generate_fill_event(channel);
395 ++rx_queue->slow_fill_count; 346 ++rx_queue->slow_fill_count;
396 /* Push new RX descriptors, allowing at least 1 jiffy for
397 * the kernel to free some more memory. */
398 rc = __efx_fast_push_rx_descriptors(rx_queue, 1);
399 if (rc)
400 efx_schedule_slow_fill(rx_queue, 1);
401} 347}
402 348
403static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, 349static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
@@ -682,6 +628,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
682 628
683 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); 629 EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue);
684 630
631 del_timer_sync(&rx_queue->slow_fill);
685 efx_nic_fini_rx(rx_queue); 632 efx_nic_fini_rx(rx_queue);
686 633
687 /* Release RX buffers NB start at index 0 not current HW ptr */ 634 /* Release RX buffers NB start at index 0 not current HW ptr */