aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgb
diff options
context:
space:
mode:
authorAlexander Duyck <alexander.h.duyck@intel.com>2009-03-18 21:15:21 -0400
committerDavid S. Miller <davem@davemloft.net>2009-03-20 04:17:24 -0400
commitfe52eeb82b746de441ed27c54ace940efe86bc9a (patch)
treee10b642cc9df4f479be0ee4ba2dfd378d51c0dd5 /drivers/net/ixgb
parent5f66f208064f083aab5e55935d0575892e033b59 (diff)
ixgb: refactor tx path to use skb_dma_map/unmap
This code updates ixgb so that it can use the skb_dma_map/unmap functions to map the buffers. In addition it also updates the tx hang logic to use time_stamp instead of dma to determine if it has detected a tx hang. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/ixgb')
-rw-r--r--drivers/net/ixgb/ixgb_main.c70
1 files changed, 39 insertions, 31 deletions
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index e2ef16b29700..4b0ea66d7a44 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -887,19 +887,13 @@ static void
887ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter, 887ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
888 struct ixgb_buffer *buffer_info) 888 struct ixgb_buffer *buffer_info)
889{ 889{
890 struct pci_dev *pdev = adapter->pdev;
891
892 if (buffer_info->dma)
893 pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
894 PCI_DMA_TODEVICE);
895
896 /* okay to call kfree_skb here instead of kfree_skb_any because
897 * this is never called in interrupt context */
898 if (buffer_info->skb)
899 dev_kfree_skb(buffer_info->skb);
900
901 buffer_info->skb = NULL;
902 buffer_info->dma = 0; 890 buffer_info->dma = 0;
891 if (buffer_info->skb) {
892 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb,
893 DMA_TO_DEVICE);
894 dev_kfree_skb_any(buffer_info->skb);
895 buffer_info->skb = NULL;
896 }
903 buffer_info->time_stamp = 0; 897 buffer_info->time_stamp = 0;
904 /* these fields must always be initialized in tx 898 /* these fields must always be initialized in tx
905 * buffer_info->length = 0; 899 * buffer_info->length = 0;
@@ -1275,17 +1269,23 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1275{ 1269{
1276 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring; 1270 struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
1277 struct ixgb_buffer *buffer_info; 1271 struct ixgb_buffer *buffer_info;
1278 int len = skb->len; 1272 int len = skb_headlen(skb);
1279 unsigned int offset = 0, size, count = 0, i; 1273 unsigned int offset = 0, size, count = 0, i;
1280 unsigned int mss = skb_shinfo(skb)->gso_size; 1274 unsigned int mss = skb_shinfo(skb)->gso_size;
1281 1275
1282 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1276 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
1283 unsigned int f; 1277 unsigned int f;
1284 1278 dma_addr_t *map;
1285 len -= skb->data_len;
1286 1279
1287 i = tx_ring->next_to_use; 1280 i = tx_ring->next_to_use;
1288 1281
1282 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) {
1283 dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
1284 return 0;
1285 }
1286
1287 map = skb_shinfo(skb)->dma_maps;
1288
1289 while (len) { 1289 while (len) {
1290 buffer_info = &tx_ring->buffer_info[i]; 1290 buffer_info = &tx_ring->buffer_info[i];
1291 size = min(len, IXGB_MAX_DATA_PER_TXD); 1291 size = min(len, IXGB_MAX_DATA_PER_TXD);
@@ -1297,7 +1297,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1297 buffer_info->length = size; 1297 buffer_info->length = size;
1298 WARN_ON(buffer_info->dma != 0); 1298 WARN_ON(buffer_info->dma != 0);
1299 buffer_info->time_stamp = jiffies; 1299 buffer_info->time_stamp = jiffies;
1300 buffer_info->dma = 1300 buffer_info->dma = map[0] + offset;
1301 pci_map_single(adapter->pdev, 1301 pci_map_single(adapter->pdev,
1302 skb->data + offset, 1302 skb->data + offset,
1303 size, 1303 size,
@@ -1307,7 +1307,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1307 len -= size; 1307 len -= size;
1308 offset += size; 1308 offset += size;
1309 count++; 1309 count++;
1310 if (++i == tx_ring->count) i = 0; 1310 if (len) {
1311 i++;
1312 if (i == tx_ring->count)
1313 i = 0;
1314 }
1311 } 1315 }
1312 1316
1313 for (f = 0; f < nr_frags; f++) { 1317 for (f = 0; f < nr_frags; f++) {
@@ -1318,6 +1322,10 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1318 offset = 0; 1322 offset = 0;
1319 1323
1320 while (len) { 1324 while (len) {
1325 i++;
1326 if (i == tx_ring->count)
1327 i = 0;
1328
1321 buffer_info = &tx_ring->buffer_info[i]; 1329 buffer_info = &tx_ring->buffer_info[i];
1322 size = min(len, IXGB_MAX_DATA_PER_TXD); 1330 size = min(len, IXGB_MAX_DATA_PER_TXD);
1323 1331
@@ -1329,21 +1337,14 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
1329 1337
1330 buffer_info->length = size; 1338 buffer_info->length = size;
1331 buffer_info->time_stamp = jiffies; 1339 buffer_info->time_stamp = jiffies;
1332 buffer_info->dma = 1340 buffer_info->dma = map[f + 1] + offset;
1333 pci_map_page(adapter->pdev,
1334 frag->page,
1335 frag->page_offset + offset,
1336 size,
1337 PCI_DMA_TODEVICE);
1338 buffer_info->next_to_watch = 0; 1341 buffer_info->next_to_watch = 0;
1339 1342
1340 len -= size; 1343 len -= size;
1341 offset += size; 1344 offset += size;
1342 count++; 1345 count++;
1343 if (++i == tx_ring->count) i = 0;
1344 } 1346 }
1345 } 1347 }
1346 i = (i == 0) ? tx_ring->count - 1 : i - 1;
1347 tx_ring->buffer_info[i].skb = skb; 1348 tx_ring->buffer_info[i].skb = skb;
1348 tx_ring->buffer_info[first].next_to_watch = i; 1349 tx_ring->buffer_info[first].next_to_watch = i;
1349 1350
@@ -1445,6 +1446,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1445 unsigned int first; 1446 unsigned int first;
1446 unsigned int tx_flags = 0; 1447 unsigned int tx_flags = 0;
1447 int vlan_id = 0; 1448 int vlan_id = 0;
1449 int count = 0;
1448 int tso; 1450 int tso;
1449 1451
1450 if (test_bit(__IXGB_DOWN, &adapter->flags)) { 1452 if (test_bit(__IXGB_DOWN, &adapter->flags)) {
@@ -1479,13 +1481,19 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1479 else if (ixgb_tx_csum(adapter, skb)) 1481 else if (ixgb_tx_csum(adapter, skb))
1480 tx_flags |= IXGB_TX_FLAGS_CSUM; 1482 tx_flags |= IXGB_TX_FLAGS_CSUM;
1481 1483
1482 ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id, 1484 count = ixgb_tx_map(adapter, skb, first);
1483 tx_flags);
1484 1485
1485 netdev->trans_start = jiffies; 1486 if (count) {
1487 ixgb_tx_queue(adapter, count, vlan_id, tx_flags);
1488 netdev->trans_start = jiffies;
1489 /* Make sure there is space in the ring for the next send. */
1490 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
1486 1491
1487 /* Make sure there is space in the ring for the next send. */ 1492 } else {
1488 ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED); 1493 dev_kfree_skb_any(skb);
1494 adapter->tx_ring.buffer_info[first].time_stamp = 0;
1495 adapter->tx_ring.next_to_use = first;
1496 }
1489 1497
1490 return NETDEV_TX_OK; 1498 return NETDEV_TX_OK;
1491} 1499}
@@ -1818,7 +1826,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
1818 /* detect a transmit hang in hardware, this serializes the 1826 /* detect a transmit hang in hardware, this serializes the
1819 * check with the clearing of time_stamp and movement of i */ 1827 * check with the clearing of time_stamp and movement of i */
1820 adapter->detect_tx_hung = false; 1828 adapter->detect_tx_hung = false;
1821 if (tx_ring->buffer_info[eop].dma && 1829 if (tx_ring->buffer_info[eop].time_stamp &&
1822 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ) 1830 time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
1823 && !(IXGB_READ_REG(&adapter->hw, STATUS) & 1831 && !(IXGB_READ_REG(&adapter->hw, STATUS) &
1824 IXGB_STATUS_TXOFF)) { 1832 IXGB_STATUS_TXOFF)) {