aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ixgbe/ixgbe.h11
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c32
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c157
3 files changed, 93 insertions, 107 deletions
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index c993fc3ab8a5..70ccab074658 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -148,6 +148,7 @@ struct ixgbe_queue_stats {
148 148
149struct ixgbe_ring { 149struct ixgbe_ring {
150 void *desc; /* descriptor ring memory */ 150 void *desc; /* descriptor ring memory */
151 struct device *dev; /* device for DMA mapping */
151 union { 152 union {
152 struct ixgbe_tx_buffer *tx_buffer_info; 153 struct ixgbe_tx_buffer *tx_buffer_info;
153 struct ixgbe_rx_buffer *rx_buffer_info; 154 struct ixgbe_rx_buffer *rx_buffer_info;
@@ -454,10 +455,10 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
454extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 455extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
455extern void ixgbe_reset(struct ixgbe_adapter *adapter); 456extern void ixgbe_reset(struct ixgbe_adapter *adapter);
456extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 457extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
457extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 458extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
458extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 459extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
459extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 460extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
460extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 461extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
461extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 462extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
462extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 463extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
463extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 464extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
@@ -467,7 +468,7 @@ extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
467 struct net_device *, 468 struct net_device *,
468 struct ixgbe_adapter *, 469 struct ixgbe_adapter *,
469 struct ixgbe_ring *); 470 struct ixgbe_ring *);
470extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, 471extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
471 struct ixgbe_tx_buffer *); 472 struct ixgbe_tx_buffer *);
472extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 473extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
473 struct ixgbe_ring *rx_ring, 474 struct ixgbe_ring *rx_ring,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 81fa1ac1c9ba..cc7804962b2e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -900,13 +900,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
900 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 900 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
901 sizeof(struct ixgbe_ring)); 901 sizeof(struct ixgbe_ring));
902 temp_tx_ring[i].count = new_tx_count; 902 temp_tx_ring[i].count = new_tx_count;
903 err = ixgbe_setup_tx_resources(adapter, 903 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
904 &temp_tx_ring[i]);
905 if (err) { 904 if (err) {
906 while (i) { 905 while (i) {
907 i--; 906 i--;
908 ixgbe_free_tx_resources(adapter, 907 ixgbe_free_tx_resources(&temp_tx_ring[i]);
909 &temp_tx_ring[i]);
910 } 908 }
911 goto clear_reset; 909 goto clear_reset;
912 } 910 }
@@ -925,13 +923,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
925 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 923 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
926 sizeof(struct ixgbe_ring)); 924 sizeof(struct ixgbe_ring));
927 temp_rx_ring[i].count = new_rx_count; 925 temp_rx_ring[i].count = new_rx_count;
928 err = ixgbe_setup_rx_resources(adapter, 926 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
929 &temp_rx_ring[i]);
930 if (err) { 927 if (err) {
931 while (i) { 928 while (i) {
932 i--; 929 i--;
933 ixgbe_free_rx_resources(adapter, 930 ixgbe_free_rx_resources(&temp_rx_ring[i]);
934 &temp_rx_ring[i]);
935 } 931 }
936 goto err_setup; 932 goto err_setup;
937 } 933 }
@@ -946,8 +942,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
946 /* tx */ 942 /* tx */
947 if (new_tx_count != adapter->tx_ring_count) { 943 if (new_tx_count != adapter->tx_ring_count) {
948 for (i = 0; i < adapter->num_tx_queues; i++) { 944 for (i = 0; i < adapter->num_tx_queues; i++) {
949 ixgbe_free_tx_resources(adapter, 945 ixgbe_free_tx_resources(adapter->tx_ring[i]);
950 adapter->tx_ring[i]);
951 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 946 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
952 sizeof(struct ixgbe_ring)); 947 sizeof(struct ixgbe_ring));
953 } 948 }
@@ -957,8 +952,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
957 /* rx */ 952 /* rx */
958 if (new_rx_count != adapter->rx_ring_count) { 953 if (new_rx_count != adapter->rx_ring_count) {
959 for (i = 0; i < adapter->num_rx_queues; i++) { 954 for (i = 0; i < adapter->num_rx_queues; i++) {
960 ixgbe_free_rx_resources(adapter, 955 ixgbe_free_rx_resources(adapter->rx_ring[i]);
961 adapter->rx_ring[i]);
962 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 956 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
963 sizeof(struct ixgbe_ring)); 957 sizeof(struct ixgbe_ring));
964 } 958 }
@@ -1463,8 +1457,8 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1463 1457
1464 ixgbe_reset(adapter); 1458 ixgbe_reset(adapter);
1465 1459
1466 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); 1460 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1467 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); 1461 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1468} 1462}
1469 1463
1470static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1464static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1478,10 +1472,11 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1478 /* Setup Tx descriptor ring and Tx buffers */ 1472 /* Setup Tx descriptor ring and Tx buffers */
1479 tx_ring->count = IXGBE_DEFAULT_TXD; 1473 tx_ring->count = IXGBE_DEFAULT_TXD;
1480 tx_ring->queue_index = 0; 1474 tx_ring->queue_index = 0;
1475 tx_ring->dev = &adapter->pdev->dev;
1481 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1476 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1482 tx_ring->numa_node = adapter->node; 1477 tx_ring->numa_node = adapter->node;
1483 1478
1484 err = ixgbe_setup_tx_resources(adapter, tx_ring); 1479 err = ixgbe_setup_tx_resources(tx_ring);
1485 if (err) 1480 if (err)
1486 return 1; 1481 return 1;
1487 1482
@@ -1496,11 +1491,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1496 /* Setup Rx Descriptor ring and Rx buffers */ 1491 /* Setup Rx Descriptor ring and Rx buffers */
1497 rx_ring->count = IXGBE_DEFAULT_RXD; 1492 rx_ring->count = IXGBE_DEFAULT_RXD;
1498 rx_ring->queue_index = 0; 1493 rx_ring->queue_index = 0;
1494 rx_ring->dev = &adapter->pdev->dev;
1499 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1495 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1500 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; 1496 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1501 rx_ring->numa_node = adapter->node; 1497 rx_ring->numa_node = adapter->node;
1502 1498
1503 err = ixgbe_setup_rx_resources(adapter, rx_ring); 1499 err = ixgbe_setup_rx_resources(rx_ring);
1504 if (err) { 1500 if (err) {
1505 ret_val = 4; 1501 ret_val = 4;
1506 goto err_nomem; 1502 goto err_nomem;
@@ -1622,7 +1618,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1622 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1618 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1623 1619
1624 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1620 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1625 dma_unmap_single(&adapter->pdev->dev, 1621 dma_unmap_single(rx_ring->dev,
1626 rx_buffer_info->dma, 1622 rx_buffer_info->dma,
1627 bufsz, 1623 bufsz,
1628 DMA_FROM_DEVICE); 1624 DMA_FROM_DEVICE);
@@ -1634,7 +1630,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1634 1630
1635 /* unmap buffer on Tx side */ 1631 /* unmap buffer on Tx side */
1636 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1632 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1637 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1633 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1638 1634
1639 /* increment Rx/Tx next to clean counters */ 1635 /* increment Rx/Tx next to clean counters */
1640 rx_ntc++; 1636 rx_ntc++;
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8f2afaa35dd9..be76dd9b94a9 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -600,18 +600,17 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
600 } 600 }
601} 601}
602 602
603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer 604 struct ixgbe_tx_buffer *tx_buffer_info)
605 *tx_buffer_info)
606{ 605{
607 if (tx_buffer_info->dma) { 606 if (tx_buffer_info->dma) {
608 if (tx_buffer_info->mapped_as_page) 607 if (tx_buffer_info->mapped_as_page)
609 dma_unmap_page(&adapter->pdev->dev, 608 dma_unmap_page(tx_ring->dev,
610 tx_buffer_info->dma, 609 tx_buffer_info->dma,
611 tx_buffer_info->length, 610 tx_buffer_info->length,
612 DMA_TO_DEVICE); 611 DMA_TO_DEVICE);
613 else 612 else
614 dma_unmap_single(&adapter->pdev->dev, 613 dma_unmap_single(tx_ring->dev,
615 tx_buffer_info->dma, 614 tx_buffer_info->dma,
616 tx_buffer_info->length, 615 tx_buffer_info->length,
617 DMA_TO_DEVICE); 616 DMA_TO_DEVICE);
@@ -764,7 +763,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
764 total_packets += tx_buffer_info->gso_segs; 763 total_packets += tx_buffer_info->gso_segs;
765 } 764 }
766 765
767 ixgbe_unmap_and_free_tx_resource(adapter, 766 ixgbe_unmap_and_free_tx_resource(tx_ring,
768 tx_buffer_info); 767 tx_buffer_info);
769 } 768 }
770 769
@@ -1011,7 +1010,6 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1011 struct ixgbe_ring *rx_ring, 1010 struct ixgbe_ring *rx_ring,
1012 u16 cleaned_count) 1011 u16 cleaned_count)
1013{ 1012{
1014 struct pci_dev *pdev = adapter->pdev;
1015 union ixgbe_adv_rx_desc *rx_desc; 1013 union ixgbe_adv_rx_desc *rx_desc;
1016 struct ixgbe_rx_buffer *bi; 1014 struct ixgbe_rx_buffer *bi;
1017 struct sk_buff *skb; 1015 struct sk_buff *skb;
@@ -1035,11 +1033,11 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1035 } 1033 }
1036 1034
1037 if (!bi->dma) { 1035 if (!bi->dma) {
1038 bi->dma = dma_map_single(&pdev->dev, 1036 bi->dma = dma_map_single(rx_ring->dev,
1039 skb->data, 1037 skb->data,
1040 rx_ring->rx_buf_len, 1038 rx_ring->rx_buf_len,
1041 DMA_FROM_DEVICE); 1039 DMA_FROM_DEVICE);
1042 if (dma_mapping_error(&pdev->dev, bi->dma)) { 1040 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1043 adapter->alloc_rx_buff_failed++; 1041 adapter->alloc_rx_buff_failed++;
1044 bi->dma = 0; 1042 bi->dma = 0;
1045 goto no_buffers; 1043 goto no_buffers;
@@ -1058,12 +1056,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1058 if (!bi->page_dma) { 1056 if (!bi->page_dma) {
1059 /* use a half page if we're re-using */ 1057 /* use a half page if we're re-using */
1060 bi->page_offset ^= PAGE_SIZE / 2; 1058 bi->page_offset ^= PAGE_SIZE / 2;
1061 bi->page_dma = dma_map_page(&pdev->dev, 1059 bi->page_dma = dma_map_page(rx_ring->dev,
1062 bi->page, 1060 bi->page,
1063 bi->page_offset, 1061 bi->page_offset,
1064 PAGE_SIZE / 2, 1062 PAGE_SIZE / 2,
1065 DMA_FROM_DEVICE); 1063 DMA_FROM_DEVICE);
1066 if (dma_mapping_error(&pdev->dev, 1064 if (dma_mapping_error(rx_ring->dev,
1067 bi->page_dma)) { 1065 bi->page_dma)) {
1068 adapter->alloc_rx_page_failed++; 1066 adapter->alloc_rx_page_failed++;
1069 bi->page_dma = 0; 1067 bi->page_dma = 0;
@@ -1151,7 +1149,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1151 int *work_done, int work_to_do) 1149 int *work_done, int work_to_do)
1152{ 1150{
1153 struct ixgbe_adapter *adapter = q_vector->adapter; 1151 struct ixgbe_adapter *adapter = q_vector->adapter;
1154 struct pci_dev *pdev = adapter->pdev;
1155 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1152 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1156 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1153 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1157 struct sk_buff *skb; 1154 struct sk_buff *skb;
@@ -1208,7 +1205,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1208 IXGBE_RSC_CB(skb)->delay_unmap = true; 1205 IXGBE_RSC_CB(skb)->delay_unmap = true;
1209 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1206 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1210 } else { 1207 } else {
1211 dma_unmap_single(&pdev->dev, 1208 dma_unmap_single(rx_ring->dev,
1212 rx_buffer_info->dma, 1209 rx_buffer_info->dma,
1213 rx_ring->rx_buf_len, 1210 rx_ring->rx_buf_len,
1214 DMA_FROM_DEVICE); 1211 DMA_FROM_DEVICE);
@@ -1218,8 +1215,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1218 } 1215 }
1219 1216
1220 if (upper_len) { 1217 if (upper_len) {
1221 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1218 dma_unmap_page(rx_ring->dev,
1222 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1219 rx_buffer_info->page_dma,
1220 PAGE_SIZE / 2,
1221 DMA_FROM_DEVICE);
1223 rx_buffer_info->page_dma = 0; 1222 rx_buffer_info->page_dma = 0;
1224 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1223 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1225 rx_buffer_info->page, 1224 rx_buffer_info->page,
@@ -1262,7 +1261,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1262 &(rx_ring->rsc_count)); 1261 &(rx_ring->rsc_count));
1263 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { 1262 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1264 if (IXGBE_RSC_CB(skb)->delay_unmap) { 1263 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1265 dma_unmap_single(&pdev->dev, 1264 dma_unmap_single(rx_ring->dev,
1266 IXGBE_RSC_CB(skb)->dma, 1265 IXGBE_RSC_CB(skb)->dma,
1267 rx_ring->rx_buf_len, 1266 rx_ring->rx_buf_len,
1268 DMA_FROM_DEVICE); 1267 DMA_FROM_DEVICE);
@@ -3665,15 +3664,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3665 3664
3666/** 3665/**
3667 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 3666 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3668 * @adapter: board private structure
3669 * @rx_ring: ring to free buffers from 3667 * @rx_ring: ring to free buffers from
3670 **/ 3668 **/
3671static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3669static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3672 struct ixgbe_ring *rx_ring)
3673{ 3670{
3674 struct pci_dev *pdev = adapter->pdev; 3671 struct device *dev = rx_ring->dev;
3675 unsigned long size; 3672 unsigned long size;
3676 unsigned int i; 3673 u16 i;
3677 3674
3678 /* ring already cleared, nothing to do */ 3675 /* ring already cleared, nothing to do */
3679 if (!rx_ring->rx_buffer_info) 3676 if (!rx_ring->rx_buffer_info)
@@ -3685,7 +3682,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3685 3682
3686 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3683 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3687 if (rx_buffer_info->dma) { 3684 if (rx_buffer_info->dma) {
3688 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3685 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3689 rx_ring->rx_buf_len, 3686 rx_ring->rx_buf_len,
3690 DMA_FROM_DEVICE); 3687 DMA_FROM_DEVICE);
3691 rx_buffer_info->dma = 0; 3688 rx_buffer_info->dma = 0;
@@ -3696,7 +3693,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3696 do { 3693 do {
3697 struct sk_buff *this = skb; 3694 struct sk_buff *this = skb;
3698 if (IXGBE_RSC_CB(this)->delay_unmap) { 3695 if (IXGBE_RSC_CB(this)->delay_unmap) {
3699 dma_unmap_single(&pdev->dev, 3696 dma_unmap_single(dev,
3700 IXGBE_RSC_CB(this)->dma, 3697 IXGBE_RSC_CB(this)->dma,
3701 rx_ring->rx_buf_len, 3698 rx_ring->rx_buf_len,
3702 DMA_FROM_DEVICE); 3699 DMA_FROM_DEVICE);
@@ -3710,7 +3707,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3710 if (!rx_buffer_info->page) 3707 if (!rx_buffer_info->page)
3711 continue; 3708 continue;
3712 if (rx_buffer_info->page_dma) { 3709 if (rx_buffer_info->page_dma) {
3713 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 3710 dma_unmap_page(dev, rx_buffer_info->page_dma,
3714 PAGE_SIZE / 2, DMA_FROM_DEVICE); 3711 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3715 rx_buffer_info->page_dma = 0; 3712 rx_buffer_info->page_dma = 0;
3716 } 3713 }
@@ -3731,15 +3728,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3731 3728
3732/** 3729/**
3733 * ixgbe_clean_tx_ring - Free Tx Buffers 3730 * ixgbe_clean_tx_ring - Free Tx Buffers
3734 * @adapter: board private structure
3735 * @tx_ring: ring to be cleaned 3731 * @tx_ring: ring to be cleaned
3736 **/ 3732 **/
3737static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 3733static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3738 struct ixgbe_ring *tx_ring)
3739{ 3734{
3740 struct ixgbe_tx_buffer *tx_buffer_info; 3735 struct ixgbe_tx_buffer *tx_buffer_info;
3741 unsigned long size; 3736 unsigned long size;
3742 unsigned int i; 3737 u16 i;
3743 3738
3744 /* ring already cleared, nothing to do */ 3739 /* ring already cleared, nothing to do */
3745 if (!tx_ring->tx_buffer_info) 3740 if (!tx_ring->tx_buffer_info)
@@ -3748,7 +3743,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3748 /* Free all the Tx ring sk_buffs */ 3743 /* Free all the Tx ring sk_buffs */
3749 for (i = 0; i < tx_ring->count; i++) { 3744 for (i = 0; i < tx_ring->count; i++) {
3750 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 3745 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3751 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 3746 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3752 } 3747 }
3753 3748
3754 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 3749 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3770,7 +3765,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3770 int i; 3765 int i;
3771 3766
3772 for (i = 0; i < adapter->num_rx_queues; i++) 3767 for (i = 0; i < adapter->num_rx_queues; i++)
3773 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); 3768 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3774} 3769}
3775 3770
3776/** 3771/**
@@ -3782,7 +3777,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3782 int i; 3777 int i;
3783 3778
3784 for (i = 0; i < adapter->num_tx_queues; i++) 3779 for (i = 0; i < adapter->num_tx_queues; i++)
3785 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); 3780 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3786} 3781}
3787 3782
3788void ixgbe_down(struct ixgbe_adapter *adapter) 3783void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -4440,6 +4435,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4440static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 4435static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4441{ 4436{
4442 int i; 4437 int i;
4438 int rx_count;
4443 int orig_node = adapter->node; 4439 int orig_node = adapter->node;
4444 4440
4445 for (i = 0; i < adapter->num_tx_queues; i++) { 4441 for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -4458,6 +4454,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4458 goto err_tx_ring_allocation; 4454 goto err_tx_ring_allocation;
4459 ring->count = adapter->tx_ring_count; 4455 ring->count = adapter->tx_ring_count;
4460 ring->queue_index = i; 4456 ring->queue_index = i;
4457 ring->dev = &adapter->pdev->dev;
4461 ring->numa_node = adapter->node; 4458 ring->numa_node = adapter->node;
4462 4459
4463 adapter->tx_ring[i] = ring; 4460 adapter->tx_ring[i] = ring;
@@ -4466,6 +4463,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4466 /* Restore the adapter's original node */ 4463 /* Restore the adapter's original node */
4467 adapter->node = orig_node; 4464 adapter->node = orig_node;
4468 4465
4466 rx_count = adapter->rx_ring_count;
4469 for (i = 0; i < adapter->num_rx_queues; i++) { 4467 for (i = 0; i < adapter->num_rx_queues; i++) {
4470 struct ixgbe_ring *ring = adapter->rx_ring[i]; 4468 struct ixgbe_ring *ring = adapter->rx_ring[i];
4471 if (orig_node == -1) { 4469 if (orig_node == -1) {
@@ -4480,8 +4478,9 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4480 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4478 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
4481 if (!ring) 4479 if (!ring)
4482 goto err_rx_ring_allocation; 4480 goto err_rx_ring_allocation;
4483 ring->count = adapter->rx_ring_count; 4481 ring->count = rx_count;
4484 ring->queue_index = i; 4482 ring->queue_index = i;
4483 ring->dev = &adapter->pdev->dev;
4485 ring->numa_node = adapter->node; 4484 ring->numa_node = adapter->node;
4486 4485
4487 adapter->rx_ring[i] = ring; 4486 adapter->rx_ring[i] = ring;
@@ -4938,15 +4937,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4938 4937
4939/** 4938/**
4940 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 4939 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4941 * @adapter: board private structure
4942 * @tx_ring: tx descriptor ring (for a specific queue) to setup 4940 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4943 * 4941 *
4944 * Return 0 on success, negative on failure 4942 * Return 0 on success, negative on failure
4945 **/ 4943 **/
4946int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 4944int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4947 struct ixgbe_ring *tx_ring)
4948{ 4945{
4949 struct pci_dev *pdev = adapter->pdev; 4946 struct device *dev = tx_ring->dev;
4950 int size; 4947 int size;
4951 4948
4952 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4949 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -4961,7 +4958,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4961 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 4958 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4962 tx_ring->size = ALIGN(tx_ring->size, 4096); 4959 tx_ring->size = ALIGN(tx_ring->size, 4096);
4963 4960
4964 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 4961 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4965 &tx_ring->dma, GFP_KERNEL); 4962 &tx_ring->dma, GFP_KERNEL);
4966 if (!tx_ring->desc) 4963 if (!tx_ring->desc)
4967 goto err; 4964 goto err;
@@ -4974,7 +4971,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4974err: 4971err:
4975 vfree(tx_ring->tx_buffer_info); 4972 vfree(tx_ring->tx_buffer_info);
4976 tx_ring->tx_buffer_info = NULL; 4973 tx_ring->tx_buffer_info = NULL;
4977 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); 4974 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
4978 return -ENOMEM; 4975 return -ENOMEM;
4979} 4976}
4980 4977
@@ -4993,7 +4990,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
4993 int i, err = 0; 4990 int i, err = 0;
4994 4991
4995 for (i = 0; i < adapter->num_tx_queues; i++) { 4992 for (i = 0; i < adapter->num_tx_queues; i++) {
4996 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 4993 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
4997 if (!err) 4994 if (!err)
4998 continue; 4995 continue;
4999 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 4996 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5005,48 +5002,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5005 5002
5006/** 5003/**
5007 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5004 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5008 * @adapter: board private structure
5009 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5005 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5010 * 5006 *
5011 * Returns 0 on success, negative on failure 5007 * Returns 0 on success, negative on failure
5012 **/ 5008 **/
5013int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 5009int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5014 struct ixgbe_ring *rx_ring)
5015{ 5010{
5016 struct pci_dev *pdev = adapter->pdev; 5011 struct device *dev = rx_ring->dev;
5017 int size; 5012 int size;
5018 5013
5019 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5014 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5020 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); 5015 rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
5021 if (!rx_ring->rx_buffer_info) 5016 if (!rx_ring->rx_buffer_info)
5022 rx_ring->rx_buffer_info = vmalloc(size); 5017 rx_ring->rx_buffer_info = vmalloc(size);
5023 if (!rx_ring->rx_buffer_info) { 5018 if (!rx_ring->rx_buffer_info)
5024 e_err(probe, "vmalloc allocation failed for the Rx " 5019 goto err;
5025 "descriptor ring\n");
5026 goto alloc_failed;
5027 }
5028 memset(rx_ring->rx_buffer_info, 0, size); 5020 memset(rx_ring->rx_buffer_info, 0, size);
5029 5021
5030 /* Round up to nearest 4K */ 5022 /* Round up to nearest 4K */
5031 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5023 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5032 rx_ring->size = ALIGN(rx_ring->size, 4096); 5024 rx_ring->size = ALIGN(rx_ring->size, 4096);
5033 5025
5034 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 5026 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5035 &rx_ring->dma, GFP_KERNEL); 5027 &rx_ring->dma, GFP_KERNEL);
5036 5028
5037 if (!rx_ring->desc) { 5029 if (!rx_ring->desc)
5038 e_err(probe, "Memory allocation failed for the Rx " 5030 goto err;
5039 "descriptor ring\n");
5040 vfree(rx_ring->rx_buffer_info);
5041 goto alloc_failed;
5042 }
5043 5031
5044 rx_ring->next_to_clean = 0; 5032 rx_ring->next_to_clean = 0;
5045 rx_ring->next_to_use = 0; 5033 rx_ring->next_to_use = 0;
5046 5034
5047 return 0; 5035 return 0;
5048 5036err:
5049alloc_failed: 5037 vfree(rx_ring->rx_buffer_info);
5038 rx_ring->rx_buffer_info = NULL;
5039 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5050 return -ENOMEM; 5040 return -ENOMEM;
5051} 5041}
5052 5042
@@ -5060,13 +5050,12 @@ alloc_failed:
5060 * 5050 *
5061 * Return 0 on success, negative on failure 5051 * Return 0 on success, negative on failure
5062 **/ 5052 **/
5063
5064static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5053static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5065{ 5054{
5066 int i, err = 0; 5055 int i, err = 0;
5067 5056
5068 for (i = 0; i < adapter->num_rx_queues; i++) { 5057 for (i = 0; i < adapter->num_rx_queues; i++) {
5069 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 5058 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5070 if (!err) 5059 if (!err)
5071 continue; 5060 continue;
5072 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5061 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5078,23 +5067,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5078 5067
5079/** 5068/**
5080 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5069 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5081 * @adapter: board private structure
5082 * @tx_ring: Tx descriptor ring for a specific queue 5070 * @tx_ring: Tx descriptor ring for a specific queue
5083 * 5071 *
5084 * Free all transmit software resources 5072 * Free all transmit software resources
5085 **/ 5073 **/
5086void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5074void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5087 struct ixgbe_ring *tx_ring)
5088{ 5075{
5089 struct pci_dev *pdev = adapter->pdev; 5076 ixgbe_clean_tx_ring(tx_ring);
5090
5091 ixgbe_clean_tx_ring(adapter, tx_ring);
5092 5077
5093 vfree(tx_ring->tx_buffer_info); 5078 vfree(tx_ring->tx_buffer_info);
5094 tx_ring->tx_buffer_info = NULL; 5079 tx_ring->tx_buffer_info = NULL;
5095 5080
5096 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 5081 /* if not set, then don't free */
5097 tx_ring->dma); 5082 if (!tx_ring->desc)
5083 return;
5084
5085 dma_free_coherent(tx_ring->dev, tx_ring->size,
5086 tx_ring->desc, tx_ring->dma);
5098 5087
5099 tx_ring->desc = NULL; 5088 tx_ring->desc = NULL;
5100} 5089}
@@ -5111,28 +5100,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5111 5100
5112 for (i = 0; i < adapter->num_tx_queues; i++) 5101 for (i = 0; i < adapter->num_tx_queues; i++)
5113 if (adapter->tx_ring[i]->desc) 5102 if (adapter->tx_ring[i]->desc)
5114 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); 5103 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5115} 5104}
5116 5105
5117/** 5106/**
5118 * ixgbe_free_rx_resources - Free Rx Resources 5107 * ixgbe_free_rx_resources - Free Rx Resources
5119 * @adapter: board private structure
5120 * @rx_ring: ring to clean the resources from 5108 * @rx_ring: ring to clean the resources from
5121 * 5109 *
5122 * Free all receive software resources 5110 * Free all receive software resources
5123 **/ 5111 **/
5124void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5112void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5125 struct ixgbe_ring *rx_ring)
5126{ 5113{
5127 struct pci_dev *pdev = adapter->pdev; 5114 ixgbe_clean_rx_ring(rx_ring);
5128
5129 ixgbe_clean_rx_ring(adapter, rx_ring);
5130 5115
5131 vfree(rx_ring->rx_buffer_info); 5116 vfree(rx_ring->rx_buffer_info);
5132 rx_ring->rx_buffer_info = NULL; 5117 rx_ring->rx_buffer_info = NULL;
5133 5118
5134 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 5119 /* if not set, then don't free */
5135 rx_ring->dma); 5120 if (!rx_ring->desc)
5121 return;
5122
5123 dma_free_coherent(rx_ring->dev, rx_ring->size,
5124 rx_ring->desc, rx_ring->dma);
5136 5125
5137 rx_ring->desc = NULL; 5126 rx_ring->desc = NULL;
5138} 5127}
@@ -5149,7 +5138,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5149 5138
5150 for (i = 0; i < adapter->num_rx_queues; i++) 5139 for (i = 0; i < adapter->num_rx_queues; i++)
5151 if (adapter->rx_ring[i]->desc) 5140 if (adapter->rx_ring[i]->desc)
5152 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); 5141 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5153} 5142}
5154 5143
5155/** 5144/**
@@ -5985,7 +5974,7 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
5985 struct sk_buff *skb, u32 tx_flags, 5974 struct sk_buff *skb, u32 tx_flags,
5986 unsigned int first, const u8 hdr_len) 5975 unsigned int first, const u8 hdr_len)
5987{ 5976{
5988 struct pci_dev *pdev = adapter->pdev; 5977 struct device *dev = tx_ring->dev;
5989 struct ixgbe_tx_buffer *tx_buffer_info; 5978 struct ixgbe_tx_buffer *tx_buffer_info;
5990 unsigned int len; 5979 unsigned int len;
5991 unsigned int total = skb->len; 5980 unsigned int total = skb->len;
@@ -6008,10 +5997,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6008 5997
6009 tx_buffer_info->length = size; 5998 tx_buffer_info->length = size;
6010 tx_buffer_info->mapped_as_page = false; 5999 tx_buffer_info->mapped_as_page = false;
6011 tx_buffer_info->dma = dma_map_single(&pdev->dev, 6000 tx_buffer_info->dma = dma_map_single(dev,
6012 skb->data + offset, 6001 skb->data + offset,
6013 size, DMA_TO_DEVICE); 6002 size, DMA_TO_DEVICE);
6014 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6003 if (dma_mapping_error(dev, tx_buffer_info->dma))
6015 goto dma_error; 6004 goto dma_error;
6016 tx_buffer_info->time_stamp = jiffies; 6005 tx_buffer_info->time_stamp = jiffies;
6017 tx_buffer_info->next_to_watch = i; 6006 tx_buffer_info->next_to_watch = i;
@@ -6044,12 +6033,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6044 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6033 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6045 6034
6046 tx_buffer_info->length = size; 6035 tx_buffer_info->length = size;
6047 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, 6036 tx_buffer_info->dma = dma_map_page(dev,
6048 frag->page, 6037 frag->page,
6049 offset, size, 6038 offset, size,
6050 DMA_TO_DEVICE); 6039 DMA_TO_DEVICE);
6051 tx_buffer_info->mapped_as_page = true; 6040 tx_buffer_info->mapped_as_page = true;
6052 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6041 if (dma_mapping_error(dev, tx_buffer_info->dma))
6053 goto dma_error; 6042 goto dma_error;
6054 tx_buffer_info->time_stamp = jiffies; 6043 tx_buffer_info->time_stamp = jiffies;
6055 tx_buffer_info->next_to_watch = i; 6044 tx_buffer_info->next_to_watch = i;
@@ -6097,7 +6086,7 @@ dma_error:
6097 i += tx_ring->count; 6086 i += tx_ring->count;
6098 i--; 6087 i--;
6099 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6088 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6100 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 6089 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
6101 } 6090 }
6102 6091
6103 return 0; 6092 return 0;