aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-09-22 15:38:25 -0400
committerDavid S. Miller <davem@davemloft.net>2012-09-22 15:38:25 -0400
commit717ecc276dadf31cd858d4e56ac71c3cc479fc19 (patch)
tree7b96dc8e447aafadc1a19c5a13f9214f41d4707f /drivers
parent7146b2d9f11e07848050f53b71bafa37a95ae609 (diff)
parentc9f14bf3a49f86e6402a6e3476a180f2bdc8a71b (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Jeff Kirsher says: ==================== This series contains updates to igb only. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h8
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c370
2 files changed, 122 insertions, 256 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 43c8e2914263..8aad230c0592 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -101,7 +101,6 @@ struct vf_data_storage {
101 u16 pf_vlan; /* When set, guest VLAN config not allowed. */ 101 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
102 u16 pf_qos; 102 u16 pf_qos;
103 u16 tx_rate; 103 u16 tx_rate;
104 struct pci_dev *vfdev;
105}; 104};
106 105
107#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */ 106#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
@@ -169,8 +168,8 @@ struct igb_tx_buffer {
169 unsigned int bytecount; 168 unsigned int bytecount;
170 u16 gso_segs; 169 u16 gso_segs;
171 __be16 protocol; 170 __be16 protocol;
172 dma_addr_t dma; 171 DEFINE_DMA_UNMAP_ADDR(dma);
173 u32 length; 172 DEFINE_DMA_UNMAP_LEN(len);
174 u32 tx_flags; 173 u32 tx_flags;
175}; 174};
176 175
@@ -214,7 +213,6 @@ struct igb_q_vector {
214 struct igb_ring_container rx, tx; 213 struct igb_ring_container rx, tx;
215 214
216 struct napi_struct napi; 215 struct napi_struct napi;
217 int numa_node;
218 216
219 u16 itr_val; 217 u16 itr_val;
220 u8 set_itr; 218 u8 set_itr;
@@ -259,7 +257,6 @@ struct igb_ring {
259 }; 257 };
260 /* Items past this point are only used during ring alloc / free */ 258 /* Items past this point are only used during ring alloc / free */
261 dma_addr_t dma; /* phys address of the ring */ 259 dma_addr_t dma; /* phys address of the ring */
262 int numa_node; /* node to alloc ring memory on */
263}; 260};
264 261
265enum e1000_ring_flags_t { 262enum e1000_ring_flags_t {
@@ -374,7 +371,6 @@ struct igb_adapter {
374 int vf_rate_link_speed; 371 int vf_rate_link_speed;
375 u32 rss_queues; 372 u32 rss_queues;
376 u32 wvbr; 373 u32 wvbr;
377 int node;
378 u32 *shadow_vfta; 374 u32 *shadow_vfta;
379 375
380#ifdef CONFIG_IGB_PTP 376#ifdef CONFIG_IGB_PTP
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 246646b61a1a..60bf46534835 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -172,8 +172,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *);
172 172
173#ifdef CONFIG_PCI_IOV 173#ifdef CONFIG_PCI_IOV
174static int igb_vf_configure(struct igb_adapter *adapter, int vf); 174static int igb_vf_configure(struct igb_adapter *adapter, int vf);
175static int igb_find_enabled_vfs(struct igb_adapter *adapter); 175static bool igb_vfs_are_assigned(struct igb_adapter *adapter);
176static int igb_check_vf_assignment(struct igb_adapter *adapter);
177#endif 176#endif
178 177
179#ifdef CONFIG_PM 178#ifdef CONFIG_PM
@@ -404,8 +403,8 @@ static void igb_dump(struct igb_adapter *adapter)
404 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; 403 buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
405 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n", 404 pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
406 n, tx_ring->next_to_use, tx_ring->next_to_clean, 405 n, tx_ring->next_to_use, tx_ring->next_to_clean,
407 (u64)buffer_info->dma, 406 (u64)dma_unmap_addr(buffer_info, dma),
408 buffer_info->length, 407 dma_unmap_len(buffer_info, len),
409 buffer_info->next_to_watch, 408 buffer_info->next_to_watch,
410 (u64)buffer_info->time_stamp); 409 (u64)buffer_info->time_stamp);
411 } 410 }
@@ -456,8 +455,8 @@ static void igb_dump(struct igb_adapter *adapter)
456 " %04X %p %016llX %p%s\n", i, 455 " %04X %p %016llX %p%s\n", i,
457 le64_to_cpu(u0->a), 456 le64_to_cpu(u0->a),
458 le64_to_cpu(u0->b), 457 le64_to_cpu(u0->b),
459 (u64)buffer_info->dma, 458 (u64)dma_unmap_addr(buffer_info, dma),
460 buffer_info->length, 459 dma_unmap_len(buffer_info, len),
461 buffer_info->next_to_watch, 460 buffer_info->next_to_watch,
462 (u64)buffer_info->time_stamp, 461 (u64)buffer_info->time_stamp,
463 buffer_info->skb, next_desc); 462 buffer_info->skb, next_desc);
@@ -466,7 +465,8 @@ static void igb_dump(struct igb_adapter *adapter)
466 print_hex_dump(KERN_INFO, "", 465 print_hex_dump(KERN_INFO, "",
467 DUMP_PREFIX_ADDRESS, 466 DUMP_PREFIX_ADDRESS,
468 16, 1, buffer_info->skb->data, 467 16, 1, buffer_info->skb->data,
469 buffer_info->length, true); 468 dma_unmap_len(buffer_info, len),
469 true);
470 } 470 }
471 } 471 }
472 472
@@ -683,52 +683,29 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
683{ 683{
684 struct igb_ring *ring; 684 struct igb_ring *ring;
685 int i; 685 int i;
686 int orig_node = adapter->node;
687 686
688 for (i = 0; i < adapter->num_tx_queues; i++) { 687 for (i = 0; i < adapter->num_tx_queues; i++) {
689 if (orig_node == -1) { 688 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
690 int cur_node = next_online_node(adapter->node);
691 if (cur_node == MAX_NUMNODES)
692 cur_node = first_online_node;
693 adapter->node = cur_node;
694 }
695 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
696 adapter->node);
697 if (!ring)
698 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
699 if (!ring) 689 if (!ring)
700 goto err; 690 goto err;
701 ring->count = adapter->tx_ring_count; 691 ring->count = adapter->tx_ring_count;
702 ring->queue_index = i; 692 ring->queue_index = i;
703 ring->dev = &adapter->pdev->dev; 693 ring->dev = &adapter->pdev->dev;
704 ring->netdev = adapter->netdev; 694 ring->netdev = adapter->netdev;
705 ring->numa_node = adapter->node;
706 /* For 82575, context index must be unique per ring. */ 695 /* For 82575, context index must be unique per ring. */
707 if (adapter->hw.mac.type == e1000_82575) 696 if (adapter->hw.mac.type == e1000_82575)
708 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags); 697 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
709 adapter->tx_ring[i] = ring; 698 adapter->tx_ring[i] = ring;
710 } 699 }
711 /* Restore the adapter's original node */
712 adapter->node = orig_node;
713 700
714 for (i = 0; i < adapter->num_rx_queues; i++) { 701 for (i = 0; i < adapter->num_rx_queues; i++) {
715 if (orig_node == -1) { 702 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
716 int cur_node = next_online_node(adapter->node);
717 if (cur_node == MAX_NUMNODES)
718 cur_node = first_online_node;
719 adapter->node = cur_node;
720 }
721 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
722 adapter->node);
723 if (!ring)
724 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
725 if (!ring) 703 if (!ring)
726 goto err; 704 goto err;
727 ring->count = adapter->rx_ring_count; 705 ring->count = adapter->rx_ring_count;
728 ring->queue_index = i; 706 ring->queue_index = i;
729 ring->dev = &adapter->pdev->dev; 707 ring->dev = &adapter->pdev->dev;
730 ring->netdev = adapter->netdev; 708 ring->netdev = adapter->netdev;
731 ring->numa_node = adapter->node;
732 /* set flag indicating ring supports SCTP checksum offload */ 709 /* set flag indicating ring supports SCTP checksum offload */
733 if (adapter->hw.mac.type >= e1000_82576) 710 if (adapter->hw.mac.type >= e1000_82576)
734 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 711 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
@@ -742,16 +719,12 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
742 719
743 adapter->rx_ring[i] = ring; 720 adapter->rx_ring[i] = ring;
744 } 721 }
745 /* Restore the adapter's original node */
746 adapter->node = orig_node;
747 722
748 igb_cache_ring_register(adapter); 723 igb_cache_ring_register(adapter);
749 724
750 return 0; 725 return 0;
751 726
752err: 727err:
753 /* Restore the adapter's original node */
754 adapter->node = orig_node;
755 igb_free_queues(adapter); 728 igb_free_queues(adapter);
756 729
757 return -ENOMEM; 730 return -ENOMEM;
@@ -1117,24 +1090,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1117 struct igb_q_vector *q_vector; 1090 struct igb_q_vector *q_vector;
1118 struct e1000_hw *hw = &adapter->hw; 1091 struct e1000_hw *hw = &adapter->hw;
1119 int v_idx; 1092 int v_idx;
1120 int orig_node = adapter->node;
1121 1093
1122 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1094 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1123 if ((adapter->num_q_vectors == (adapter->num_rx_queues + 1095 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 adapter->num_tx_queues)) && 1096 GFP_KERNEL);
1125 (adapter->num_rx_queues == v_idx))
1126 adapter->node = orig_node;
1127 if (orig_node == -1) {
1128 int cur_node = next_online_node(adapter->node);
1129 if (cur_node == MAX_NUMNODES)
1130 cur_node = first_online_node;
1131 adapter->node = cur_node;
1132 }
1133 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1134 adapter->node);
1135 if (!q_vector)
1136 q_vector = kzalloc(sizeof(struct igb_q_vector),
1137 GFP_KERNEL);
1138 if (!q_vector) 1097 if (!q_vector)
1139 goto err_out; 1098 goto err_out;
1140 q_vector->adapter = adapter; 1099 q_vector->adapter = adapter;
@@ -1143,14 +1102,10 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1143 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1102 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1144 adapter->q_vector[v_idx] = q_vector; 1103 adapter->q_vector[v_idx] = q_vector;
1145 } 1104 }
1146 /* Restore the adapter's original node */
1147 adapter->node = orig_node;
1148 1105
1149 return 0; 1106 return 0;
1150 1107
1151err_out: 1108err_out:
1152 /* Restore the adapter's original node */
1153 adapter->node = orig_node;
1154 igb_free_q_vectors(adapter); 1109 igb_free_q_vectors(adapter);
1155 return -ENOMEM; 1110 return -ENOMEM;
1156} 1111}
@@ -2300,11 +2255,11 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2300 /* reclaim resources allocated to VFs */ 2255 /* reclaim resources allocated to VFs */
2301 if (adapter->vf_data) { 2256 if (adapter->vf_data) {
2302 /* disable iov and allow time for transactions to clear */ 2257 /* disable iov and allow time for transactions to clear */
2303 if (!igb_check_vf_assignment(adapter)) { 2258 if (igb_vfs_are_assigned(adapter)) {
2259 dev_info(&pdev->dev, "Unloading driver while VFs are assigned - VFs will not be deallocated\n");
2260 } else {
2304 pci_disable_sriov(pdev); 2261 pci_disable_sriov(pdev);
2305 msleep(500); 2262 msleep(500);
2306 } else {
2307 dev_info(&pdev->dev, "VF(s) assigned to guests!\n");
2308 } 2263 }
2309 2264
2310 kfree(adapter->vf_data); 2265 kfree(adapter->vf_data);
@@ -2344,7 +2299,7 @@ static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
2344#ifdef CONFIG_PCI_IOV 2299#ifdef CONFIG_PCI_IOV
2345 struct pci_dev *pdev = adapter->pdev; 2300 struct pci_dev *pdev = adapter->pdev;
2346 struct e1000_hw *hw = &adapter->hw; 2301 struct e1000_hw *hw = &adapter->hw;
2347 int old_vfs = igb_find_enabled_vfs(adapter); 2302 int old_vfs = pci_num_vf(adapter->pdev);
2348 int i; 2303 int i;
2349 2304
2350 /* Virtualization features not supported on i210 family. */ 2305 /* Virtualization features not supported on i210 family. */
@@ -2424,8 +2379,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2424 VLAN_HLEN; 2379 VLAN_HLEN;
2425 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2380 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2426 2381
2427 adapter->node = -1;
2428
2429 spin_lock_init(&adapter->stats64_lock); 2382 spin_lock_init(&adapter->stats64_lock);
2430#ifdef CONFIG_PCI_IOV 2383#ifdef CONFIG_PCI_IOV
2431 switch (hw->mac.type) { 2384 switch (hw->mac.type) {
@@ -2672,13 +2625,11 @@ static int igb_close(struct net_device *netdev)
2672int igb_setup_tx_resources(struct igb_ring *tx_ring) 2625int igb_setup_tx_resources(struct igb_ring *tx_ring)
2673{ 2626{
2674 struct device *dev = tx_ring->dev; 2627 struct device *dev = tx_ring->dev;
2675 int orig_node = dev_to_node(dev);
2676 int size; 2628 int size;
2677 2629
2678 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 2630 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2679 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node); 2631
2680 if (!tx_ring->tx_buffer_info) 2632 tx_ring->tx_buffer_info = vzalloc(size);
2681 tx_ring->tx_buffer_info = vzalloc(size);
2682 if (!tx_ring->tx_buffer_info) 2633 if (!tx_ring->tx_buffer_info)
2683 goto err; 2634 goto err;
2684 2635
@@ -2686,18 +2637,10 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2686 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2637 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2687 tx_ring->size = ALIGN(tx_ring->size, 4096); 2638 tx_ring->size = ALIGN(tx_ring->size, 4096);
2688 2639
2689 set_dev_node(dev, tx_ring->numa_node);
2690 tx_ring->desc = dma_alloc_coherent(dev, 2640 tx_ring->desc = dma_alloc_coherent(dev,
2691 tx_ring->size, 2641 tx_ring->size,
2692 &tx_ring->dma, 2642 &tx_ring->dma,
2693 GFP_KERNEL); 2643 GFP_KERNEL);
2694 set_dev_node(dev, orig_node);
2695 if (!tx_ring->desc)
2696 tx_ring->desc = dma_alloc_coherent(dev,
2697 tx_ring->size,
2698 &tx_ring->dma,
2699 GFP_KERNEL);
2700
2701 if (!tx_ring->desc) 2644 if (!tx_ring->desc)
2702 goto err; 2645 goto err;
2703 2646
@@ -2708,8 +2651,8 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2708 2651
2709err: 2652err:
2710 vfree(tx_ring->tx_buffer_info); 2653 vfree(tx_ring->tx_buffer_info);
2711 dev_err(dev, 2654 tx_ring->tx_buffer_info = NULL;
2712 "Unable to allocate memory for the transmit descriptor ring\n"); 2655 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
2713 return -ENOMEM; 2656 return -ENOMEM;
2714} 2657}
2715 2658
@@ -2826,34 +2769,23 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2826int igb_setup_rx_resources(struct igb_ring *rx_ring) 2769int igb_setup_rx_resources(struct igb_ring *rx_ring)
2827{ 2770{
2828 struct device *dev = rx_ring->dev; 2771 struct device *dev = rx_ring->dev;
2829 int orig_node = dev_to_node(dev); 2772 int size;
2830 int size, desc_len;
2831 2773
2832 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 2774 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2833 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node); 2775
2834 if (!rx_ring->rx_buffer_info) 2776 rx_ring->rx_buffer_info = vzalloc(size);
2835 rx_ring->rx_buffer_info = vzalloc(size);
2836 if (!rx_ring->rx_buffer_info) 2777 if (!rx_ring->rx_buffer_info)
2837 goto err; 2778 goto err;
2838 2779
2839 desc_len = sizeof(union e1000_adv_rx_desc);
2840 2780
2841 /* Round up to nearest 4K */ 2781 /* Round up to nearest 4K */
2842 rx_ring->size = rx_ring->count * desc_len; 2782 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
2843 rx_ring->size = ALIGN(rx_ring->size, 4096); 2783 rx_ring->size = ALIGN(rx_ring->size, 4096);
2844 2784
2845 set_dev_node(dev, rx_ring->numa_node);
2846 rx_ring->desc = dma_alloc_coherent(dev, 2785 rx_ring->desc = dma_alloc_coherent(dev,
2847 rx_ring->size, 2786 rx_ring->size,
2848 &rx_ring->dma, 2787 &rx_ring->dma,
2849 GFP_KERNEL); 2788 GFP_KERNEL);
2850 set_dev_node(dev, orig_node);
2851 if (!rx_ring->desc)
2852 rx_ring->desc = dma_alloc_coherent(dev,
2853 rx_ring->size,
2854 &rx_ring->dma,
2855 GFP_KERNEL);
2856
2857 if (!rx_ring->desc) 2789 if (!rx_ring->desc)
2858 goto err; 2790 goto err;
2859 2791
@@ -2865,8 +2797,7 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2865err: 2797err:
2866 vfree(rx_ring->rx_buffer_info); 2798 vfree(rx_ring->rx_buffer_info);
2867 rx_ring->rx_buffer_info = NULL; 2799 rx_ring->rx_buffer_info = NULL;
2868 dev_err(dev, "Unable to allocate memory for the receive descriptor" 2800 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
2869 " ring\n");
2870 return -ENOMEM; 2801 return -ENOMEM;
2871} 2802}
2872 2803
@@ -2904,57 +2835,48 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2904{ 2835{
2905 struct e1000_hw *hw = &adapter->hw; 2836 struct e1000_hw *hw = &adapter->hw;
2906 u32 mrqc, rxcsum; 2837 u32 mrqc, rxcsum;
2907 u32 j, num_rx_queues, shift = 0, shift2 = 0; 2838 u32 j, num_rx_queues, shift = 0;
2908 union e1000_reta { 2839 static const u32 rsskey[10] = { 0xDA565A6D, 0xC20E5B25, 0x3D256741,
2909 u32 dword; 2840 0xB08FA343, 0xCB2BCAD0, 0xB4307BAE,
2910 u8 bytes[4]; 2841 0xA32DCB77, 0x0CF23080, 0x3BB7426A,
2911 } reta; 2842 0xFA01ACBE };
2912 static const u8 rsshash[40] = {
2913 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2914 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2915 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2916 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2917 2843
2918 /* Fill out hash function seeds */ 2844 /* Fill out hash function seeds */
2919 for (j = 0; j < 10; j++) { 2845 for (j = 0; j < 10; j++)
2920 u32 rsskey = rsshash[(j * 4)]; 2846 wr32(E1000_RSSRK(j), rsskey[j]);
2921 rsskey |= rsshash[(j * 4) + 1] << 8;
2922 rsskey |= rsshash[(j * 4) + 2] << 16;
2923 rsskey |= rsshash[(j * 4) + 3] << 24;
2924 array_wr32(E1000_RSSRK(0), j, rsskey);
2925 }
2926 2847
2927 num_rx_queues = adapter->rss_queues; 2848 num_rx_queues = adapter->rss_queues;
2928 2849
2929 if (adapter->vfs_allocated_count) { 2850 switch (hw->mac.type) {
2930 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2851 case e1000_82575:
2931 switch (hw->mac.type) { 2852 shift = 6;
2932 case e1000_i350: 2853 break;
2933 case e1000_82580: 2854 case e1000_82576:
2934 num_rx_queues = 1; 2855 /* 82576 supports 2 RSS queues for SR-IOV */
2935 shift = 0; 2856 if (adapter->vfs_allocated_count) {
2936 break;
2937 case e1000_82576:
2938 shift = 3; 2857 shift = 3;
2939 num_rx_queues = 2; 2858 num_rx_queues = 2;
2940 break;
2941 case e1000_82575:
2942 shift = 2;
2943 shift2 = 6;
2944 default:
2945 break;
2946 } 2859 }
2947 } else { 2860 break;
2948 if (hw->mac.type == e1000_82575) 2861 default:
2949 shift = 6; 2862 break;
2950 } 2863 }
2951 2864
2952 for (j = 0; j < (32 * 4); j++) { 2865 /*
2953 reta.bytes[j & 3] = (j % num_rx_queues) << shift; 2866 * Populate the indirection table 4 entries at a time. To do this
2954 if (shift2) 2867 * we are generating the results for n and n+2 and then interleaving
2955 reta.bytes[j & 3] |= num_rx_queues << shift2; 2868 * those with the results with n+1 and n+3.
2956 if ((j & 3) == 3) 2869 */
2957 wr32(E1000_RETA(j >> 2), reta.dword); 2870 for (j = 0; j < 32; j++) {
2871 /* first pass generates n and n+2 */
2872 u32 base = ((j * 0x00040004) + 0x00020000) * num_rx_queues;
2873 u32 reta = (base & 0x07800780) >> (7 - shift);
2874
2875 /* second pass generates n+1 and n+3 */
2876 base += 0x00010001 * num_rx_queues;
2877 reta |= (base & 0x07800780) << (1 + shift);
2878
2879 wr32(E1000_RETA(j), reta);
2958 } 2880 }
2959 2881
2960 /* 2882 /*
@@ -3277,20 +3199,20 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3277{ 3199{
3278 if (tx_buffer->skb) { 3200 if (tx_buffer->skb) {
3279 dev_kfree_skb_any(tx_buffer->skb); 3201 dev_kfree_skb_any(tx_buffer->skb);
3280 if (tx_buffer->dma) 3202 if (dma_unmap_len(tx_buffer, len))
3281 dma_unmap_single(ring->dev, 3203 dma_unmap_single(ring->dev,
3282 tx_buffer->dma, 3204 dma_unmap_addr(tx_buffer, dma),
3283 tx_buffer->length, 3205 dma_unmap_len(tx_buffer, len),
3284 DMA_TO_DEVICE); 3206 DMA_TO_DEVICE);
3285 } else if (tx_buffer->dma) { 3207 } else if (dma_unmap_len(tx_buffer, len)) {
3286 dma_unmap_page(ring->dev, 3208 dma_unmap_page(ring->dev,
3287 tx_buffer->dma, 3209 dma_unmap_addr(tx_buffer, dma),
3288 tx_buffer->length, 3210 dma_unmap_len(tx_buffer, len),
3289 DMA_TO_DEVICE); 3211 DMA_TO_DEVICE);
3290 } 3212 }
3291 tx_buffer->next_to_watch = NULL; 3213 tx_buffer->next_to_watch = NULL;
3292 tx_buffer->skb = NULL; 3214 tx_buffer->skb = NULL;
3293 tx_buffer->dma = 0; 3215 dma_unmap_len_set(tx_buffer, len, 0);
3294 /* buffer_info must be completely set up in the transmit path */ 3216 /* buffer_info must be completely set up in the transmit path */
3295} 3217}
3296 3218
@@ -4285,7 +4207,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4285 const u8 hdr_len) 4207 const u8 hdr_len)
4286{ 4208{
4287 struct sk_buff *skb = first->skb; 4209 struct sk_buff *skb = first->skb;
4288 struct igb_tx_buffer *tx_buffer_info; 4210 struct igb_tx_buffer *tx_buffer;
4289 union e1000_adv_tx_desc *tx_desc; 4211 union e1000_adv_tx_desc *tx_desc;
4290 dma_addr_t dma; 4212 dma_addr_t dma;
4291 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; 4213 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
@@ -4306,8 +4228,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4306 goto dma_error; 4228 goto dma_error;
4307 4229
4308 /* record length, and DMA address */ 4230 /* record length, and DMA address */
4309 first->length = size; 4231 dma_unmap_len_set(first, len, size);
4310 first->dma = dma; 4232 dma_unmap_addr_set(first, dma, dma);
4311 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4233 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4312 4234
4313 for (;;) { 4235 for (;;) {
@@ -4349,9 +4271,9 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4349 if (dma_mapping_error(tx_ring->dev, dma)) 4271 if (dma_mapping_error(tx_ring->dev, dma))
4350 goto dma_error; 4272 goto dma_error;
4351 4273
4352 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4274 tx_buffer = &tx_ring->tx_buffer_info[i];
4353 tx_buffer_info->length = size; 4275 dma_unmap_len_set(tx_buffer, len, size);
4354 tx_buffer_info->dma = dma; 4276 dma_unmap_addr_set(tx_buffer, dma, dma);
4355 4277
4356 tx_desc->read.olinfo_status = 0; 4278 tx_desc->read.olinfo_status = 0;
4357 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4279 tx_desc->read.buffer_addr = cpu_to_le64(dma);
@@ -4402,9 +4324,9 @@ dma_error:
4402 4324
4403 /* clear dma mappings for failed tx_buffer_info map */ 4325 /* clear dma mappings for failed tx_buffer_info map */
4404 for (;;) { 4326 for (;;) {
4405 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4327 tx_buffer = &tx_ring->tx_buffer_info[i];
4406 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); 4328 igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
4407 if (tx_buffer_info == first) 4329 if (tx_buffer == first)
4408 break; 4330 break;
4409 if (i == 0) 4331 if (i == 0)
4410 i = tx_ring->count; 4332 i = tx_ring->count;
@@ -4777,7 +4699,11 @@ void igb_update_stats(struct igb_adapter *adapter,
4777 reg = rd32(E1000_CTRL_EXT); 4699 reg = rd32(E1000_CTRL_EXT);
4778 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) { 4700 if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
4779 adapter->stats.rxerrc += rd32(E1000_RXERRC); 4701 adapter->stats.rxerrc += rd32(E1000_RXERRC);
4780 adapter->stats.tncrs += rd32(E1000_TNCRS); 4702
4703 /* this stat has invalid values on i210/i211 */
4704 if ((hw->mac.type != e1000_i210) &&
4705 (hw->mac.type != e1000_i211))
4706 adapter->stats.tncrs += rd32(E1000_TNCRS);
4781 } 4707 }
4782 4708
4783 adapter->stats.tsctc += rd32(E1000_TSCTC); 4709 adapter->stats.tsctc += rd32(E1000_TSCTC);
@@ -5037,102 +4963,43 @@ static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5037static int igb_vf_configure(struct igb_adapter *adapter, int vf) 4963static int igb_vf_configure(struct igb_adapter *adapter, int vf)
5038{ 4964{
5039 unsigned char mac_addr[ETH_ALEN]; 4965 unsigned char mac_addr[ETH_ALEN];
5040 struct pci_dev *pdev = adapter->pdev;
5041 struct e1000_hw *hw = &adapter->hw;
5042 struct pci_dev *pvfdev;
5043 unsigned int device_id;
5044 u16 thisvf_devfn;
5045 4966
5046 eth_random_addr(mac_addr); 4967 eth_random_addr(mac_addr);
5047 igb_set_vf_mac(adapter, vf, mac_addr); 4968 igb_set_vf_mac(adapter, vf, mac_addr);
5048 4969
5049 switch (adapter->hw.mac.type) { 4970 return 0;
5050 case e1000_82576:
5051 device_id = IGB_82576_VF_DEV_ID;
5052 /* VF Stride for 82576 is 2 */
5053 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 1)) |
5054 (pdev->devfn & 1);
5055 break;
5056 case e1000_i350:
5057 device_id = IGB_I350_VF_DEV_ID;
5058 /* VF Stride for I350 is 4 */
5059 thisvf_devfn = (pdev->devfn + 0x80 + (vf << 2)) |
5060 (pdev->devfn & 3);
5061 break;
5062 default:
5063 device_id = 0;
5064 thisvf_devfn = 0;
5065 break;
5066 }
5067
5068 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5069 while (pvfdev) {
5070 if (pvfdev->devfn == thisvf_devfn)
5071 break;
5072 pvfdev = pci_get_device(hw->vendor_id,
5073 device_id, pvfdev);
5074 }
5075
5076 if (pvfdev)
5077 adapter->vf_data[vf].vfdev = pvfdev;
5078 else
5079 dev_err(&pdev->dev,
5080 "Couldn't find pci dev ptr for VF %4.4x\n",
5081 thisvf_devfn);
5082 return pvfdev != NULL;
5083} 4971}
5084 4972
5085static int igb_find_enabled_vfs(struct igb_adapter *adapter) 4973static bool igb_vfs_are_assigned(struct igb_adapter *adapter)
5086{ 4974{
5087 struct e1000_hw *hw = &adapter->hw;
5088 struct pci_dev *pdev = adapter->pdev; 4975 struct pci_dev *pdev = adapter->pdev;
5089 struct pci_dev *pvfdev; 4976 struct pci_dev *vfdev;
5090 u16 vf_devfn = 0; 4977 int dev_id;
5091 u16 vf_stride;
5092 unsigned int device_id;
5093 int vfs_found = 0;
5094 4978
5095 switch (adapter->hw.mac.type) { 4979 switch (adapter->hw.mac.type) {
5096 case e1000_82576: 4980 case e1000_82576:
5097 device_id = IGB_82576_VF_DEV_ID; 4981 dev_id = IGB_82576_VF_DEV_ID;
5098 /* VF Stride for 82576 is 2 */
5099 vf_stride = 2;
5100 break; 4982 break;
5101 case e1000_i350: 4983 case e1000_i350:
5102 device_id = IGB_I350_VF_DEV_ID; 4984 dev_id = IGB_I350_VF_DEV_ID;
5103 /* VF Stride for I350 is 4 */
5104 vf_stride = 4;
5105 break; 4985 break;
5106 default: 4986 default:
5107 device_id = 0; 4987 return false;
5108 vf_stride = 0;
5109 break;
5110 }
5111
5112 vf_devfn = pdev->devfn + 0x80;
5113 pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
5114 while (pvfdev) {
5115 if (pvfdev->devfn == vf_devfn &&
5116 (pvfdev->bus->number >= pdev->bus->number))
5117 vfs_found++;
5118 vf_devfn += vf_stride;
5119 pvfdev = pci_get_device(hw->vendor_id,
5120 device_id, pvfdev);
5121 } 4988 }
5122 4989
5123 return vfs_found; 4990 /* loop through all the VFs to see if we own any that are assigned */
5124} 4991 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, NULL);
5125 4992 while (vfdev) {
5126static int igb_check_vf_assignment(struct igb_adapter *adapter) 4993 /* if we don't own it we don't care */
5127{ 4994 if (vfdev->is_virtfn && vfdev->physfn == pdev) {
5128 int i; 4995 /* if it is assigned we cannot release it */
5129 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4996 if (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
5130 if (adapter->vf_data[i].vfdev) {
5131 if (adapter->vf_data[i].vfdev->dev_flags &
5132 PCI_DEV_FLAGS_ASSIGNED)
5133 return true; 4997 return true;
5134 } 4998 }
4999
5000 vfdev = pci_get_device(PCI_VENDOR_ID_INTEL, dev_id, vfdev);
5135 } 5001 }
5002
5136 return false; 5003 return false;
5137} 5004}
5138 5005
@@ -5815,7 +5682,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5815 struct igb_adapter *adapter = q_vector->adapter; 5682 struct igb_adapter *adapter = q_vector->adapter;
5816 struct igb_ring *tx_ring = q_vector->tx.ring; 5683 struct igb_ring *tx_ring = q_vector->tx.ring;
5817 struct igb_tx_buffer *tx_buffer; 5684 struct igb_tx_buffer *tx_buffer;
5818 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5685 union e1000_adv_tx_desc *tx_desc;
5819 unsigned int total_bytes = 0, total_packets = 0; 5686 unsigned int total_bytes = 0, total_packets = 0;
5820 unsigned int budget = q_vector->tx.work_limit; 5687 unsigned int budget = q_vector->tx.work_limit;
5821 unsigned int i = tx_ring->next_to_clean; 5688 unsigned int i = tx_ring->next_to_clean;
@@ -5827,16 +5694,16 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5827 tx_desc = IGB_TX_DESC(tx_ring, i); 5694 tx_desc = IGB_TX_DESC(tx_ring, i);
5828 i -= tx_ring->count; 5695 i -= tx_ring->count;
5829 5696
5830 for (; budget; budget--) { 5697 do {
5831 eop_desc = tx_buffer->next_to_watch; 5698 union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
5832
5833 /* prevent any other reads prior to eop_desc */
5834 rmb();
5835 5699
5836 /* if next_to_watch is not set then there is no work pending */ 5700 /* if next_to_watch is not set then there is no work pending */
5837 if (!eop_desc) 5701 if (!eop_desc)
5838 break; 5702 break;
5839 5703
5704 /* prevent any other reads prior to eop_desc */
5705 rmb();
5706
5840 /* if DD is not set pending work has not been completed */ 5707 /* if DD is not set pending work has not been completed */
5841 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD))) 5708 if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
5842 break; 5709 break;
@@ -5850,18 +5717,19 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5850 5717
5851 /* free the skb */ 5718 /* free the skb */
5852 dev_kfree_skb_any(tx_buffer->skb); 5719 dev_kfree_skb_any(tx_buffer->skb);
5853 tx_buffer->skb = NULL;
5854 5720
5855 /* unmap skb header data */ 5721 /* unmap skb header data */
5856 dma_unmap_single(tx_ring->dev, 5722 dma_unmap_single(tx_ring->dev,
5857 tx_buffer->dma, 5723 dma_unmap_addr(tx_buffer, dma),
5858 tx_buffer->length, 5724 dma_unmap_len(tx_buffer, len),
5859 DMA_TO_DEVICE); 5725 DMA_TO_DEVICE);
5860 5726
5727 /* clear tx_buffer data */
5728 tx_buffer->skb = NULL;
5729 dma_unmap_len_set(tx_buffer, len, 0);
5730
5861 /* clear last DMA location and unmap remaining buffers */ 5731 /* clear last DMA location and unmap remaining buffers */
5862 while (tx_desc != eop_desc) { 5732 while (tx_desc != eop_desc) {
5863 tx_buffer->dma = 0;
5864
5865 tx_buffer++; 5733 tx_buffer++;
5866 tx_desc++; 5734 tx_desc++;
5867 i++; 5735 i++;
@@ -5872,17 +5740,15 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5872 } 5740 }
5873 5741
5874 /* unmap any remaining paged data */ 5742 /* unmap any remaining paged data */
5875 if (tx_buffer->dma) { 5743 if (dma_unmap_len(tx_buffer, len)) {
5876 dma_unmap_page(tx_ring->dev, 5744 dma_unmap_page(tx_ring->dev,
5877 tx_buffer->dma, 5745 dma_unmap_addr(tx_buffer, dma),
5878 tx_buffer->length, 5746 dma_unmap_len(tx_buffer, len),
5879 DMA_TO_DEVICE); 5747 DMA_TO_DEVICE);
5748 dma_unmap_len_set(tx_buffer, len, 0);
5880 } 5749 }
5881 } 5750 }
5882 5751
5883 /* clear last DMA location */
5884 tx_buffer->dma = 0;
5885
5886 /* move us one more past the eop_desc for start of next pkt */ 5752 /* move us one more past the eop_desc for start of next pkt */
5887 tx_buffer++; 5753 tx_buffer++;
5888 tx_desc++; 5754 tx_desc++;
@@ -5892,7 +5758,13 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5892 tx_buffer = tx_ring->tx_buffer_info; 5758 tx_buffer = tx_ring->tx_buffer_info;
5893 tx_desc = IGB_TX_DESC(tx_ring, 0); 5759 tx_desc = IGB_TX_DESC(tx_ring, 0);
5894 } 5760 }
5895 } 5761
5762 /* issue prefetch for next Tx descriptor */
5763 prefetch(tx_desc);
5764
5765 /* update budget accounting */
5766 budget--;
5767 } while (likely(budget));
5896 5768
5897 netdev_tx_completed_queue(txring_txq(tx_ring), 5769 netdev_tx_completed_queue(txring_txq(tx_ring),
5898 total_packets, total_bytes); 5770 total_packets, total_bytes);
@@ -5908,12 +5780,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5908 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { 5780 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
5909 struct e1000_hw *hw = &adapter->hw; 5781 struct e1000_hw *hw = &adapter->hw;
5910 5782
5911 eop_desc = tx_buffer->next_to_watch;
5912
5913 /* Detect a transmit hang in hardware, this serializes the 5783 /* Detect a transmit hang in hardware, this serializes the
5914 * check with the clearing of time_stamp and movement of i */ 5784 * check with the clearing of time_stamp and movement of i */
5915 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 5785 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5916 if (eop_desc && 5786 if (tx_buffer->next_to_watch &&
5917 time_after(jiffies, tx_buffer->time_stamp + 5787 time_after(jiffies, tx_buffer->time_stamp +
5918 (adapter->tx_timeout_factor * HZ)) && 5788 (adapter->tx_timeout_factor * HZ)) &&
5919 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) { 5789 !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
@@ -5937,9 +5807,9 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5937 tx_ring->next_to_use, 5807 tx_ring->next_to_use,
5938 tx_ring->next_to_clean, 5808 tx_ring->next_to_clean,
5939 tx_buffer->time_stamp, 5809 tx_buffer->time_stamp,
5940 eop_desc, 5810 tx_buffer->next_to_watch,
5941 jiffies, 5811 jiffies,
5942 eop_desc->wb.status); 5812 tx_buffer->next_to_watch->wb.status);
5943 netif_stop_subqueue(tx_ring->netdev, 5813 netif_stop_subqueue(tx_ring->netdev,
5944 tx_ring->queue_index); 5814 tx_ring->queue_index);
5945 5815