aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c1
-rw-r--r--drivers/net/ixgb/ixgb_hw.c3
-rw-r--r--drivers/net/ixgb/ixgb_main.c57
4 files changed, 55 insertions, 7 deletions
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 50ffe90488ff..f4aba4355b19 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -171,6 +171,7 @@ struct ixgb_adapter {
171 171
172 /* TX */ 172 /* TX */
173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp; 173 struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
174 unsigned int restart_queue;
174 unsigned long timeo_start; 175 unsigned long timeo_start;
175 uint32_t tx_cmd_type; 176 uint32_t tx_cmd_type;
176 uint64_t hw_csum_tx_good; 177 uint64_t hw_csum_tx_good;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index cd22523fb035..82c044d6e08a 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -79,6 +79,7 @@ static struct ixgb_stats ixgb_gstrings_stats[] = {
79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 79 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
80 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 80 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 81 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
82 {"tx_restart_queue", IXGB_STAT(restart_queue) },
82 {"rx_long_length_errors", IXGB_STAT(stats.roc)}, 83 {"rx_long_length_errors", IXGB_STAT(stats.roc)},
83 {"rx_short_length_errors", IXGB_STAT(stats.ruc)}, 84 {"rx_short_length_errors", IXGB_STAT(stats.ruc)},
84#ifdef NETIF_F_TSO 85#ifdef NETIF_F_TSO
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 02089b64e42c..ecbf45861c68 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -399,8 +399,9 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
399 /* Zero out the other 15 receive addresses. */ 399 /* Zero out the other 15 receive addresses. */
400 DEBUGOUT("Clearing RAR[1-15]\n"); 400 DEBUGOUT("Clearing RAR[1-15]\n");
401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) { 401 for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
402 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0); 402 /* Write high reg first to disable the AV bit first */
403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0); 403 IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
404 IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
404 } 405 }
405 406
406 return; 407 return;
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e628126c9c49..a083a9189230 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
36#else 36#else
37#define DRIVERNAPI "-NAPI" 37#define DRIVERNAPI "-NAPI"
38#endif 38#endif
39#define DRV_VERSION "1.0.117-k2"DRIVERNAPI 39#define DRV_VERSION "1.0.126-k2"DRIVERNAPI
40char ixgb_driver_version[] = DRV_VERSION; 40char ixgb_driver_version[] = DRV_VERSION;
41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 41static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
42 42
@@ -1287,6 +1287,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1287 struct ixgb_buffer *buffer_info; 1287 struct ixgb_buffer *buffer_info;
1288 int len = skb->len; 1288 int len = skb->len;
1289 unsigned int offset = 0, size, count = 0, i; 1289 unsigned int offset = 0, size, count = 0, i;
1290 unsigned int mss = skb_shinfo(skb)->gso_size;
1290 1291
1291 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1292 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1292 unsigned int f; 1293 unsigned int f;
@@ -1298,6 +1299,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1298 while(len) { 1299 while(len) {
1299 buffer_info = &tx_ring->buffer_info[i]; 1300 buffer_info = &tx_ring->buffer_info[i];
1300 size = min(len, IXGB_MAX_DATA_PER_TXD); 1301 size = min(len, IXGB_MAX_DATA_PER_TXD);
1302 /* Workaround for premature desc write-backs
1303 * in TSO mode. Append 4-byte sentinel desc */
1304 if (unlikely(mss && !nr_frags && size == len && size > 8))
1305 size -= 4;
1306
1301 buffer_info->length = size; 1307 buffer_info->length = size;
1302 WARN_ON(buffer_info->dma != 0); 1308 WARN_ON(buffer_info->dma != 0);
1303 buffer_info->dma = 1309 buffer_info->dma =
@@ -1324,6 +1330,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1324 while(len) { 1330 while(len) {
1325 buffer_info = &tx_ring->buffer_info[i]; 1331 buffer_info = &tx_ring->buffer_info[i];
1326 size = min(len, IXGB_MAX_DATA_PER_TXD); 1332 size = min(len, IXGB_MAX_DATA_PER_TXD);
1333
1334 /* Workaround for premature desc write-backs
1335 * in TSO mode. Append 4-byte sentinel desc */
1336 if (unlikely(mss && !nr_frags && size == len
1337 && size > 8))
1338 size -= 4;
1339
1327 buffer_info->length = size; 1340 buffer_info->length = size;
1328 buffer_info->dma = 1341 buffer_info->dma =
1329 pci_map_page(adapter->pdev, 1342 pci_map_page(adapter->pdev,
@@ -1398,11 +1411,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
1398 IXGB_WRITE_REG(&adapter->hw, TDT, i); 1411 IXGB_WRITE_REG(&adapter->hw, TDT, i);
1399} 1412}
1400 1413
1414static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
1415{
1416 struct ixgb_adapter *adapter = netdev_priv(netdev);
1417 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1418
1419 netif_stop_queue(netdev);
1420 /* Herbert's original patch had:
1421 * smp_mb__after_netif_stop_queue();
1422 * but since that doesn't exist yet, just open code it. */
1423 smp_mb();
1424
1425 /* We need to check again in a case another CPU has just
1426 * made room available. */
1427 if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
1428 return -EBUSY;
1429
1430 /* A reprieve! */
1431 netif_start_queue(netdev);
1432 ++adapter->restart_queue;
1433 return 0;
1434}
1435
1436static int ixgb_maybe_stop_tx(struct net_device *netdev,
1437 struct ixgb_desc_ring *tx_ring, int size)
1438{
1439 if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
1440 return 0;
1441 return __ixgb_maybe_stop_tx(netdev, size);
1442}
1443
1444
1401/* Tx Descriptors needed, worst case */ 1445/* Tx Descriptors needed, worst case */
1402#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \ 1446#define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
1403 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 1447 (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
1404#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \ 1448#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
1405 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 1449 MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
1450 + 1 /* one more needed for sentinel TSO workaround */
1406 1451
1407static int 1452static int
1408ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 1453ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1430,7 +1475,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1430 spin_lock_irqsave(&adapter->tx_lock, flags); 1475 spin_lock_irqsave(&adapter->tx_lock, flags);
1431#endif 1476#endif
1432 1477
1433 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) { 1478 if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
1479 DESC_NEEDED))) {
1434 netif_stop_queue(netdev); 1480 netif_stop_queue(netdev);
1435 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1481 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1436 return NETDEV_TX_BUSY; 1482 return NETDEV_TX_BUSY;
@@ -1468,8 +1514,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1468 1514
1469#ifdef NETIF_F_LLTX 1515#ifdef NETIF_F_LLTX
1470 /* Make sure there is space in the ring for the next send. */ 1516 /* Make sure there is space in the ring for the next send. */
1471 if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) 1517 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1472 netif_stop_queue(netdev);
1473 1518
1474 spin_unlock_irqrestore(&adapter->tx_lock, flags); 1519 spin_unlock_irqrestore(&adapter->tx_lock, flags);
1475 1520