aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
authorJesse Brandeburg <jesse.brandeburg@intel.com>2007-01-06 12:51:23 -0500
committerAuke Kok <juke-jan.h.kok@intel.com>2007-01-06 12:51:23 -0500
commit5d9278537502d2e404e85485d1b905814fe728c0 (patch)
treeaa6f4d4493a8f67f08bd36f57d6f600a468c5655 /drivers/net/ixgb
parent81f4e6c190a0fa016fd7eecaf76a5f95d121afc2 (diff)
ixgb: Fix early TSO completion
This fix was already merged in commit 96f9c2e277768099479fbed7c3b69c294b1fadef but reverted in commit 989316ddfeafd0e8fb51a4d811383769ad62637a. After stresstesting we found that the fix does not add new regressions and works around a TX hang spotted by several users. Signed-off-by: Jesse Brandeburg <jesse.brandeburg@intel.com> Signed-off-by: Auke Kok <auke-jan.h.kok@intel.com>
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/ixgb_main.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e628126c9c49..70ac9d4a83bb 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287 struct ixgb_buffer *buffer_info; 1287 struct ixgb_buffer *buffer_info;
1288 int len = skb->len; 1288 int len = skb->len;
1289 unsigned int offset = 0, size, count = 0, i; 1289 unsigned int offset = 0, size, count = 0, i;
1290 unsigned int mss = skb_shinfo(skb)->gso_size;
1290 1291
1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1292 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1292 unsigned int f; 1293 unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1298 while(len) { 1299 while(len) {
1299 buffer_info = &tx_ring->buffer_info[i]; 1300 buffer_info = &tx_ring->buffer_info[i];
1300 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 size = min(len, IXGB_MAX_DATA_PER_TXD);
1302 /* Workaround for premature desc write-backs
1303 * in TSO mode. Append 4-byte sentinel desc */
1304 if (unlikely(mss && !nr_frags && size == len && size > 8))
1305 size -= 4;
1306
1301 buffer_info->length = size; 1307 buffer_info->length = size;
1302 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1303 buffer_info->dma = 1309 buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1324 while(len) { 1330 while(len) {
1325 buffer_info = &tx_ring->buffer_info[i]; 1331 buffer_info = &tx_ring->buffer_info[i];
1326 size = min(len, IXGB_MAX_DATA_PER_TXD); 1332 size = min(len, IXGB_MAX_DATA_PER_TXD);
1333
1334 /* Workaround for premature desc write-backs
1335 * in TSO mode. Append 4-byte sentinel desc */
1336 if (unlikely(mss && !nr_frags && size == len
1337 && size > 8))
1338 size -= 4;
1339
1327 buffer_info->length = size; 1340 buffer_info->length = size;
1328 buffer_info->dma = 1341 buffer_info->dma =
1329 pci_map_page(adapter->pdev, 1342 pci_map_page(adapter->pdev,
@@ -1401,8 +1414,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1401/* Tx Descriptors needed, worst case */ 1414/* Tx Descriptors needed, worst case */
1402#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1415#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1403 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1416 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1404#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1417#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1405 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 1418 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1419 + 1 /* one more needed for sentinel TSO workaround */
1406 1420
1407static int 1421static int
1408ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1422ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)