aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/e1000e/netdev.c
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2009-01-19 09:17:08 -0500
committerDavid S. Miller <davem@davemloft.net>2009-01-23 02:38:12 -0500
commit92af3e95e4896452ab33b1841c3e9a9d50658064 (patch)
tree03d005d0957bf89bdb714ab165334a0ea98314c5 /drivers/net/e1000e/netdev.c
parent5ef3041e4a7cd817bc5ebbb0e5e956a2bdd32c38 (diff)
e1000e: drop lltx, remove unnecessary lock
LLTX is deprecated and complicated, don't use it. It was observed by Don Ash <donash4@gmail.com> that e1000e was acquiring this lock in the NAPI cleanup path. This is obviously a bug, as this is a leftover from when e1000 supported multiple tx queues and fake netdevs. another user reported this to us and tested routing with the 2.6.27 kernel and this patch and reported a 3.5 % improvement in packets forwarded in a multi-port test on 82571 parts. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Bruce Allan <bruce.w.allan@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/e1000e/netdev.c')
-rw-r--r--drivers/net/e1000e/netdev.c34
1 files changed, 3 insertions, 31 deletions
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 2ffd7523a91c..e04b392c9a59 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -47,7 +47,7 @@
47 47
48#include "e1000.h" 48#include "e1000.h"
49 49
50#define DRV_VERSION "0.3.3.3-k6" 50#define DRV_VERSION "0.3.3.4-k2"
51char e1000e_driver_name[] = "e1000e"; 51char e1000e_driver_name[] = "e1000e";
52const char e1000e_driver_version[] = DRV_VERSION; 52const char e1000e_driver_version[] = DRV_VERSION;
53 53
@@ -1698,7 +1698,6 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
1698 1698
1699 tx_ring->next_to_use = 0; 1699 tx_ring->next_to_use = 0;
1700 tx_ring->next_to_clean = 0; 1700 tx_ring->next_to_clean = 0;
1701 spin_lock_init(&adapter->tx_queue_lock);
1702 1701
1703 return 0; 1702 return 0;
1704err: 1703err:
@@ -2007,16 +2006,7 @@ static int e1000_clean(struct napi_struct *napi, int budget)
2007 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) 2006 !(adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2008 goto clean_rx; 2007 goto clean_rx;
2009 2008
2010 /* 2009 tx_cleaned = e1000_clean_tx_irq(adapter);
2011 * e1000_clean is called per-cpu. This lock protects
2012 * tx_ring from being cleaned by multiple cpus
2013 * simultaneously. A failure obtaining the lock means
2014 * tx_ring is currently being cleaned anyway.
2015 */
2016 if (spin_trylock(&adapter->tx_queue_lock)) {
2017 tx_cleaned = e1000_clean_tx_irq(adapter);
2018 spin_unlock(&adapter->tx_queue_lock);
2019 }
2020 2010
2021clean_rx: 2011clean_rx:
2022 adapter->clean_rx(adapter, &work_done, budget); 2012 adapter->clean_rx(adapter, &work_done, budget);
@@ -2922,8 +2912,6 @@ static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
2922 if (e1000_alloc_queues(adapter)) 2912 if (e1000_alloc_queues(adapter))
2923 return -ENOMEM; 2913 return -ENOMEM;
2924 2914
2925 spin_lock_init(&adapter->tx_queue_lock);
2926
2927 /* Explicitly disable IRQ since the NIC can be in any state. */ 2915 /* Explicitly disable IRQ since the NIC can be in any state. */
2928 e1000_irq_disable(adapter); 2916 e1000_irq_disable(adapter);
2929 2917
@@ -4069,7 +4057,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4069 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4057 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4070 unsigned int tx_flags = 0; 4058 unsigned int tx_flags = 0;
4071 unsigned int len = skb->len - skb->data_len; 4059 unsigned int len = skb->len - skb->data_len;
4072 unsigned long irq_flags;
4073 unsigned int nr_frags; 4060 unsigned int nr_frags;
4074 unsigned int mss; 4061 unsigned int mss;
4075 int count = 0; 4062 int count = 0;
@@ -4138,18 +4125,12 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4138 if (adapter->hw.mac.tx_pkt_filtering) 4125 if (adapter->hw.mac.tx_pkt_filtering)
4139 e1000_transfer_dhcp_info(adapter, skb); 4126 e1000_transfer_dhcp_info(adapter, skb);
4140 4127
4141 if (!spin_trylock_irqsave(&adapter->tx_queue_lock, irq_flags))
4142 /* Collision - tell upper layer to requeue */
4143 return NETDEV_TX_LOCKED;
4144
4145 /* 4128 /*
4146 * need: count + 2 desc gap to keep tail from touching 4129 * need: count + 2 desc gap to keep tail from touching
4147 * head, otherwise try next time 4130 * head, otherwise try next time
4148 */ 4131 */
4149 if (e1000_maybe_stop_tx(netdev, count + 2)) { 4132 if (e1000_maybe_stop_tx(netdev, count + 2))
4150 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4151 return NETDEV_TX_BUSY; 4133 return NETDEV_TX_BUSY;
4152 }
4153 4134
4154 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 4135 if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
4155 tx_flags |= E1000_TX_FLAGS_VLAN; 4136 tx_flags |= E1000_TX_FLAGS_VLAN;
@@ -4161,7 +4142,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4161 tso = e1000_tso(adapter, skb); 4142 tso = e1000_tso(adapter, skb);
4162 if (tso < 0) { 4143 if (tso < 0) {
4163 dev_kfree_skb_any(skb); 4144 dev_kfree_skb_any(skb);
4164 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4165 return NETDEV_TX_OK; 4145 return NETDEV_TX_OK;
4166 } 4146 }
4167 4147
@@ -4182,7 +4162,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4182 if (count < 0) { 4162 if (count < 0) {
4183 /* handle pci_map_single() error in e1000_tx_map */ 4163 /* handle pci_map_single() error in e1000_tx_map */
4184 dev_kfree_skb_any(skb); 4164 dev_kfree_skb_any(skb);
4185 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4186 return NETDEV_TX_OK; 4165 return NETDEV_TX_OK;
4187 } 4166 }
4188 4167
@@ -4193,7 +4172,6 @@ static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
4193 /* Make sure there is space in the ring for the next send. */ 4172 /* Make sure there is space in the ring for the next send. */
4194 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2); 4173 e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
4195 4174
4196 spin_unlock_irqrestore(&adapter->tx_queue_lock, irq_flags);
4197 return NETDEV_TX_OK; 4175 return NETDEV_TX_OK;
4198} 4176}
4199 4177
@@ -4922,12 +4900,6 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4922 if (pci_using_dac) 4900 if (pci_using_dac)
4923 netdev->features |= NETIF_F_HIGHDMA; 4901 netdev->features |= NETIF_F_HIGHDMA;
4924 4902
4925 /*
4926 * We should not be using LLTX anymore, but we are still Tx faster with
4927 * it.
4928 */
4929 netdev->features |= NETIF_F_LLTX;
4930
4931 if (e1000e_enable_mng_pass_thru(&adapter->hw)) 4903 if (e1000e_enable_mng_pass_thru(&adapter->hw))
4932 adapter->flags |= FLAG_MNG_PT_ENABLED; 4904 adapter->flags |= FLAG_MNG_PT_ENABLED;
4933 4905