aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
authorAuke Kok <auke-jan.h.kok@intel.com>2006-05-26 12:35:38 -0400
committerAuke Kok <juke-jan.h.kok@intel.com>2006-05-26 12:35:38 -0400
commit989316ddfeafd0e8fb51a4d811383769ad62637a (patch)
tree42ac64d569fcd2690b9db631272abc33cb08604b /drivers/net/ixgb
parent8556f0d18923495ffd15ce87089312b3d8f2414c (diff)
ixgb: revert an unwanted fix regarding tso/descriptors
There seemed to be another bug introduced as well as a performance hit with the addtion of the sentinel descriptor workaround. Removal of this workaround appears to prevent the hang. We'll take a risk and remove it, as we had never seen the originally reported bug under linux. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com> Signed-off-by: John Ronciak <john.ronciak@intel.com>
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/ixgb_main.c15
1 files changed, 1 insertions, 14 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 26c777f1f565..5561ab6e9d36 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1295,7 +1295,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1295 struct ixgb_buffer *buffer_info; 1295 struct ixgb_buffer *buffer_info;
1296 int len = skb->len; 1296 int len = skb->len;
1297 unsigned int offset = 0, size, count = 0, i; 1297 unsigned int offset = 0, size, count = 0, i;
1298 unsigned int mss = skb_shinfo(skb)->tso_size;
1299 1298
1300 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1299 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1301 unsigned int f; 1300 unsigned int f;
@@ -1307,11 +1306,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1307 while(len) { 1306 while(len) {
1308 buffer_info = &tx_ring->buffer_info[i]; 1307 buffer_info = &tx_ring->buffer_info[i];
1309 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); 1308 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1310 /* Workaround for premature desc write-backs
1311 * in TSO mode. Append 4-byte sentinel desc */
1312 if(unlikely(mss && !nr_frags && size == len && size > 8))
1313 size -= 4;
1314
1315 buffer_info->length = size; 1309 buffer_info->length = size;
1316 buffer_info->dma = 1310 buffer_info->dma =
1317 pci_map_single(adapter->pdev, 1311 pci_map_single(adapter->pdev,
@@ -1337,12 +1331,6 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1337 while(len) { 1331 while(len) {
1338 buffer_info = &tx_ring->buffer_info[i]; 1332 buffer_info = &tx_ring->buffer_info[i];
1339 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE); 1333 size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
1340 /* Workaround for premature desc write-backs
1341 * in TSO mode. Append 4-byte sentinel desc */
1342 if(unlikely(mss && (f == (nr_frags-1)) && (size == len)
1343 && (size > 8)))
1344 size -= 4;
1345
1346 buffer_info->length = size; 1334 buffer_info->length = size;
1347 buffer_info->dma = 1335 buffer_info->dma =
1348 pci_map_page(adapter->pdev, 1336 pci_map_page(adapter->pdev,
@@ -1421,8 +1409,7 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1421#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1409#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1422 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1410 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1423#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1411#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
1424 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 \ 1412 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
1425 /* one more for TSO workaround */ + 1
1426 1413
1427static int 1414static int
1428ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1415ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)