aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sfc/efx.c
diff options
context:
space:
mode:
authorBen Hutchings <bhutchings@solarflare.com>2009-11-25 11:11:35 -0500
committerDavid S. Miller <davem@davemloft.net>2009-11-26 18:59:36 -0500
commit55edc6e6ff728681ebc10d418222740705376664 (patch)
tree66136e674adde15b9668f13d4e0486482b7f1851 /drivers/net/sfc/efx.c
parent1dfc5ceacd00365a9089e98643f4b26253d5a6aa (diff)
sfc: Split MAC stats DMA initiation and completion
From: Steve Hodgson <shodgson@solarflare.com> Currently we initiate MAC stats DMA and busy-wait for completion when stats are requested. We can improve on this with a periodic timer to initiate and poll for stats, and opportunistically poll when stats are requested. Since efx_nic::stats_disable_count and efx_stats_{disable,enable}() are Falcon-specific, rename them and move them accordingly. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sfc/efx.c')
-rw-r--r--drivers/net/sfc/efx.c48
1 files changed, 13 insertions, 35 deletions
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 155aa1cca366..41ca5dbb4c44 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -637,6 +637,7 @@ void __efx_reconfigure_port(struct efx_nic *efx)
637 netif_addr_unlock_bh(efx->net_dev); 637 netif_addr_unlock_bh(efx->net_dev);
638 } 638 }
639 639
640 falcon_stop_nic_stats(efx);
640 falcon_deconfigure_mac_wrapper(efx); 641 falcon_deconfigure_mac_wrapper(efx);
641 642
642 /* Reconfigure the PHY, disabling transmit in mac level loopback. */ 643 /* Reconfigure the PHY, disabling transmit in mac level loopback. */
@@ -651,6 +652,8 @@ void __efx_reconfigure_port(struct efx_nic *efx)
651 652
652 efx->mac_op->reconfigure(efx); 653 efx->mac_op->reconfigure(efx);
653 654
655 falcon_start_nic_stats(efx);
656
654 /* Inform kernel of loss/gain of carrier */ 657 /* Inform kernel of loss/gain of carrier */
655 efx_link_status_changed(efx); 658 efx_link_status_changed(efx);
656 return; 659 return;
@@ -749,7 +752,6 @@ static int efx_init_port(struct efx_nic *efx)
749 efx->mac_op->reconfigure(efx); 752 efx->mac_op->reconfigure(efx);
750 753
751 efx->port_initialized = true; 754 efx->port_initialized = true;
752 efx_stats_enable(efx);
753 755
754 mutex_unlock(&efx->mac_lock); 756 mutex_unlock(&efx->mac_lock);
755 return 0; 757 return 0;
@@ -802,7 +804,6 @@ static void efx_fini_port(struct efx_nic *efx)
802 if (!efx->port_initialized) 804 if (!efx->port_initialized)
803 return; 805 return;
804 806
805 efx_stats_disable(efx);
806 efx->phy_op->fini(efx); 807 efx->phy_op->fini(efx);
807 efx->port_initialized = false; 808 efx->port_initialized = false;
808 809
@@ -1158,6 +1159,8 @@ static void efx_start_all(struct efx_nic *efx)
1158 if (efx->state == STATE_RUNNING) 1159 if (efx->state == STATE_RUNNING)
1159 queue_delayed_work(efx->workqueue, &efx->monitor_work, 1160 queue_delayed_work(efx->workqueue, &efx->monitor_work,
1160 efx_monitor_interval); 1161 efx_monitor_interval);
1162
1163 falcon_start_nic_stats(efx);
1161} 1164}
1162 1165
1163/* Flush all delayed work. Should only be called when no more delayed work 1166/* Flush all delayed work. Should only be called when no more delayed work
@@ -1195,6 +1198,8 @@ static void efx_stop_all(struct efx_nic *efx)
1195 if (!efx->port_enabled) 1198 if (!efx->port_enabled)
1196 return; 1199 return;
1197 1200
1201 falcon_stop_nic_stats(efx);
1202
1198 /* Disable interrupts and wait for ISR to complete */ 1203 /* Disable interrupts and wait for ISR to complete */
1199 falcon_disable_interrupts(efx); 1204 falcon_disable_interrupts(efx);
1200 if (efx->legacy_irq) 1205 if (efx->legacy_irq)
@@ -1438,20 +1443,6 @@ static int efx_net_stop(struct net_device *net_dev)
1438 return 0; 1443 return 0;
1439} 1444}
1440 1445
1441void efx_stats_disable(struct efx_nic *efx)
1442{
1443 spin_lock(&efx->stats_lock);
1444 ++efx->stats_disable_count;
1445 spin_unlock(&efx->stats_lock);
1446}
1447
1448void efx_stats_enable(struct efx_nic *efx)
1449{
1450 spin_lock(&efx->stats_lock);
1451 --efx->stats_disable_count;
1452 spin_unlock(&efx->stats_lock);
1453}
1454
1455/* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1446/* Context: process, dev_base_lock or RTNL held, non-blocking. */
1456static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1447static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1457{ 1448{
@@ -1459,17 +1450,9 @@ static struct net_device_stats *efx_net_stats(struct net_device *net_dev)
1459 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1450 struct efx_mac_stats *mac_stats = &efx->mac_stats;
1460 struct net_device_stats *stats = &net_dev->stats; 1451 struct net_device_stats *stats = &net_dev->stats;
1461 1452
1462 /* Update stats if possible, but do not wait if another thread 1453 spin_lock_bh(&efx->stats_lock);
1463 * is updating them or if MAC stats fetches are temporarily 1454 falcon_update_nic_stats(efx);
1464 * disabled; slightly stale stats are acceptable. 1455 spin_unlock_bh(&efx->stats_lock);
1465 */
1466 if (!spin_trylock(&efx->stats_lock))
1467 return stats;
1468 if (!efx->stats_disable_count) {
1469 efx->mac_op->update_stats(efx);
1470 falcon_update_nic_stats(efx);
1471 }
1472 spin_unlock(&efx->stats_lock);
1473 1456
1474 stats->rx_packets = mac_stats->rx_packets; 1457 stats->rx_packets = mac_stats->rx_packets;
1475 stats->tx_packets = mac_stats->tx_packets; 1458 stats->tx_packets = mac_stats->tx_packets;
@@ -1726,7 +1709,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method,
1726{ 1709{
1727 EFX_ASSERT_RESET_SERIALISED(efx); 1710 EFX_ASSERT_RESET_SERIALISED(efx);
1728 1711
1729 efx_stats_disable(efx);
1730 efx_stop_all(efx); 1712 efx_stop_all(efx);
1731 mutex_lock(&efx->mac_lock); 1713 mutex_lock(&efx->mac_lock);
1732 mutex_lock(&efx->spi_lock); 1714 mutex_lock(&efx->spi_lock);
@@ -1776,10 +1758,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method,
1776 mutex_unlock(&efx->spi_lock); 1758 mutex_unlock(&efx->spi_lock);
1777 mutex_unlock(&efx->mac_lock); 1759 mutex_unlock(&efx->mac_lock);
1778 1760
1779 if (ok) { 1761 if (ok)
1780 efx_start_all(efx); 1762 efx_start_all(efx);
1781 efx_stats_enable(efx);
1782 }
1783 return rc; 1763 return rc;
1784} 1764}
1785 1765
@@ -1977,7 +1957,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1977 efx->rx_checksum_enabled = true; 1957 efx->rx_checksum_enabled = true;
1978 spin_lock_init(&efx->netif_stop_lock); 1958 spin_lock_init(&efx->netif_stop_lock);
1979 spin_lock_init(&efx->stats_lock); 1959 spin_lock_init(&efx->stats_lock);
1980 efx->stats_disable_count = 1;
1981 mutex_init(&efx->mac_lock); 1960 mutex_init(&efx->mac_lock);
1982 efx->mac_op = &efx_dummy_mac_operations; 1961 efx->mac_op = &efx_dummy_mac_operations;
1983 efx->phy_op = &efx_dummy_phy_operations; 1962 efx->phy_op = &efx_dummy_phy_operations;
@@ -2219,9 +2198,8 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2219 goto fail4; 2198 goto fail4;
2220 } 2199 }
2221 2200
2222 /* Switch to the running state before we expose the device to 2201 /* Switch to the running state before we expose the device to the OS,
2223 * the OS. This is to ensure that the initial gathering of 2202 * so that dev_open()|efx_start_all() will actually start the device */
2224 * MAC stats succeeds. */
2225 efx->state = STATE_RUNNING; 2203 efx->state = STATE_RUNNING;
2226 2204
2227 rc = efx_register_netdev(efx); 2205 rc = efx_register_netdev(efx);