aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
authorSteve Hodgson <shodgson@solarflare.com>2010-06-01 07:19:39 -0400
committerDavid S. Miller <davem@davemloft.net>2010-06-02 05:21:08 -0400
commit90d683afd1395016775c8d90508614f8d3000b81 (patch)
tree33c239c5cd1e607a22dca1af0fd3615cee584ef9 /drivers/net/sfc/efx.c
parentd730dc527a5abd4717f6320e82cfce54edc882a3 (diff)
sfc: Remove efx_rx_queue::add_lock
Ensure that efx_fast_push_rx_descriptors() must only run from efx_process_channel() [NAPI], or when napi_disable() has been executed. Reimplement the slow fill by sending an event to the channel, so that NAPI runs, and hanging the subsequent fast fill off the event handler. Replace the sfc_refill workqueue and delayed work items with a timer. We do not need to stop this timer in efx_flush_all() because it's safe to send the event always; receiving it will be delayed until NAPI is restarted. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c44
1 files changed, 7 insertions, 37 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index d1a1d32e73ee..5d9ef05e6abb 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -93,13 +93,6 @@ const char *efx_reset_type_names[] = {
93 93
94#define EFX_MAX_MTU (9 * 1024) 94#define EFX_MAX_MTU (9 * 1024)
95 95
96/* RX slow fill workqueue. If memory allocation fails in the fast path,
97 * a work item is pushed onto this work queue to retry the allocation later,
98 * to avoid the NIC being starved of RX buffers. Since this is a per cpu
99 * workqueue, there is nothing to be gained in making it per NIC
100 */
101static struct workqueue_struct *refill_workqueue;
102
103/* Reset workqueue. If any NIC has a hardware failure then a reset will be 96/* Reset workqueue. If any NIC has a hardware failure then a reset will be
104 * queued onto this work queue. This is not a per-nic work queue, because 97 * queued onto this work queue. This is not a per-nic work queue, because
105 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. 98 * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
@@ -516,11 +509,11 @@ static void efx_start_channel(struct efx_channel *channel)
516 channel->enabled = true; 509 channel->enabled = true;
517 smp_wmb(); 510 smp_wmb();
518 511
519 napi_enable(&channel->napi_str); 512 /* Fill the queues before enabling NAPI */
520
521 /* Load up RX descriptors */
522 efx_for_each_channel_rx_queue(rx_queue, channel) 513 efx_for_each_channel_rx_queue(rx_queue, channel)
523 efx_fast_push_rx_descriptors(rx_queue); 514 efx_fast_push_rx_descriptors(rx_queue);
515
516 napi_enable(&channel->napi_str);
524} 517}
525 518
526/* This disables event queue processing and packet transmission. 519/* This disables event queue processing and packet transmission.
@@ -529,8 +522,6 @@ static void efx_start_channel(struct efx_channel *channel)
529 */ 522 */
530static void efx_stop_channel(struct efx_channel *channel) 523static void efx_stop_channel(struct efx_channel *channel)
531{ 524{
532 struct efx_rx_queue *rx_queue;
533
534 if (!channel->enabled) 525 if (!channel->enabled)
535 return; 526 return;
536 527
@@ -538,12 +529,6 @@ static void efx_stop_channel(struct efx_channel *channel)
538 529
539 channel->enabled = false; 530 channel->enabled = false;
540 napi_disable(&channel->napi_str); 531 napi_disable(&channel->napi_str);
541
542 /* Ensure that any worker threads have exited or will be no-ops */
543 efx_for_each_channel_rx_queue(rx_queue, channel) {
544 spin_lock_bh(&rx_queue->add_lock);
545 spin_unlock_bh(&rx_queue->add_lock);
546 }
547} 532}
548 533
549static void efx_fini_channels(struct efx_nic *efx) 534static void efx_fini_channels(struct efx_nic *efx)
@@ -595,9 +580,9 @@ static void efx_remove_channel(struct efx_channel *channel)
595 efx_remove_eventq(channel); 580 efx_remove_eventq(channel);
596} 581}
597 582
598void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) 583void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue)
599{ 584{
600 queue_delayed_work(refill_workqueue, &rx_queue->work, delay); 585 mod_timer(&rx_queue->slow_fill, jiffies + msecs_to_jiffies(100));
601} 586}
602 587
603/************************************************************************** 588/**************************************************************************
@@ -1242,15 +1227,8 @@ static void efx_start_all(struct efx_nic *efx)
1242 * since we're holding the rtnl_lock at this point. */ 1227 * since we're holding the rtnl_lock at this point. */
1243static void efx_flush_all(struct efx_nic *efx) 1228static void efx_flush_all(struct efx_nic *efx)
1244{ 1229{
1245 struct efx_rx_queue *rx_queue;
1246
1247 /* Make sure the hardware monitor is stopped */ 1230 /* Make sure the hardware monitor is stopped */
1248 cancel_delayed_work_sync(&efx->monitor_work); 1231 cancel_delayed_work_sync(&efx->monitor_work);
1249
1250 /* Ensure that all RX slow refills are complete. */
1251 efx_for_each_rx_queue(rx_queue, efx)
1252 cancel_delayed_work_sync(&rx_queue->work);
1253
1254 /* Stop scheduled port reconfigurations */ 1232 /* Stop scheduled port reconfigurations */
1255 cancel_work_sync(&efx->mac_work); 1233 cancel_work_sync(&efx->mac_work);
1256} 1234}
@@ -2064,8 +2042,8 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2064 rx_queue->queue = i; 2042 rx_queue->queue = i;
2065 rx_queue->channel = &efx->channel[0]; /* for safety */ 2043 rx_queue->channel = &efx->channel[0]; /* for safety */
2066 rx_queue->buffer = NULL; 2044 rx_queue->buffer = NULL;
2067 spin_lock_init(&rx_queue->add_lock); 2045 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
2068 INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); 2046 (unsigned long)rx_queue);
2069 } 2047 }
2070 2048
2071 efx->type = type; 2049 efx->type = type;
@@ -2436,11 +2414,6 @@ static int __init efx_init_module(void)
2436 if (rc) 2414 if (rc)
2437 goto err_notifier; 2415 goto err_notifier;
2438 2416
2439 refill_workqueue = create_workqueue("sfc_refill");
2440 if (!refill_workqueue) {
2441 rc = -ENOMEM;
2442 goto err_refill;
2443 }
2444 reset_workqueue = create_singlethread_workqueue("sfc_reset"); 2417 reset_workqueue = create_singlethread_workqueue("sfc_reset");
2445 if (!reset_workqueue) { 2418 if (!reset_workqueue) {
2446 rc = -ENOMEM; 2419 rc = -ENOMEM;
@@ -2456,8 +2429,6 @@ static int __init efx_init_module(void)
2456 err_pci: 2429 err_pci:
2457 destroy_workqueue(reset_workqueue); 2430 destroy_workqueue(reset_workqueue);
2458 err_reset: 2431 err_reset:
2459 destroy_workqueue(refill_workqueue);
2460 err_refill:
2461 unregister_netdevice_notifier(&efx_netdev_notifier); 2432 unregister_netdevice_notifier(&efx_netdev_notifier);
2462 err_notifier: 2433 err_notifier:
2463 return rc; 2434 return rc;
@@ -2469,7 +2440,6 @@ static void __exit efx_exit_module(void)
2469 2440
2470 pci_unregister_driver(&efx_pci_driver); 2441 pci_unregister_driver(&efx_pci_driver);
2471 destroy_workqueue(reset_workqueue); 2442 destroy_workqueue(reset_workqueue);
2472 destroy_workqueue(refill_workqueue);
2473 unregister_netdevice_notifier(&efx_netdev_notifier); 2443 unregister_netdevice_notifier(&efx_netdev_notifier);
2474 2444
2475} 2445}