aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h3
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h53
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c14
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c675
4 files changed, 411 insertions, 334 deletions
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 7b8ddd830f19..68558be6f9e7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -409,6 +409,9 @@
409#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */ 409#define E1000_ICS_DRSTA E1000_ICR_DRSTA /* Device Reset Aserted */
410 410
411/* Extended Interrupt Cause Set */ 411/* Extended Interrupt Cause Set */
412/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
413#define E1000_EITR_CNT_IGNR 0x80000000 /* Don't reset counters on write */
414
412 415
413/* Transmit Descriptor Control */ 416/* Transmit Descriptor Control */
414/* Enable the counting of descriptors still to be processed. */ 417/* Enable the counting of descriptors still to be processed. */
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 77793a9debcc..4e665a9b4763 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -42,8 +42,11 @@
42 42
43struct igb_adapter; 43struct igb_adapter;
44 44
45/* ((1000000000ns / (6000ints/s * 1024ns)) << 2 = 648 */ 45/* Interrupt defines */
46#define IGB_START_ITR 648 46#define IGB_START_ITR 648 /* ~6000 ints/sec */
47#define IGB_4K_ITR 980
48#define IGB_20K_ITR 196
49#define IGB_70K_ITR 56
47 50
48/* TX/RX descriptor defines */ 51/* TX/RX descriptor defines */
49#define IGB_DEFAULT_TXD 256 52#define IGB_DEFAULT_TXD 256
@@ -146,6 +149,7 @@ struct igb_tx_buffer {
146 struct sk_buff *skb; 149 struct sk_buff *skb;
147 unsigned int bytecount; 150 unsigned int bytecount;
148 u16 gso_segs; 151 u16 gso_segs;
152 __be16 protocol;
149 dma_addr_t dma; 153 dma_addr_t dma;
150 u32 length; 154 u32 length;
151 u32 tx_flags; 155 u32 tx_flags;
@@ -174,15 +178,24 @@ struct igb_rx_queue_stats {
174 u64 alloc_failed; 178 u64 alloc_failed;
175}; 179};
176 180
181struct igb_ring_container {
182 struct igb_ring *ring; /* pointer to linked list of rings */
183 unsigned int total_bytes; /* total bytes processed this int */
184 unsigned int total_packets; /* total packets processed this int */
185 u16 work_limit; /* total work allowed per interrupt */
186 u8 count; /* total number of rings in vector */
187 u8 itr; /* current ITR setting for ring */
188};
189
177struct igb_q_vector { 190struct igb_q_vector {
178 struct igb_adapter *adapter; /* backlink */ 191 struct igb_adapter *adapter; /* backlink */
179 struct igb_ring *rx_ring; 192 int cpu; /* CPU for DCA */
180 struct igb_ring *tx_ring; 193 u32 eims_value; /* EIMS mask value */
181 struct napi_struct napi;
182 194
183 u32 eims_value; 195 struct igb_ring_container rx, tx;
184 u16 cpu; 196
185 u16 tx_work_limit; 197 struct napi_struct napi;
198 int numa_node;
186 199
187 u16 itr_val; 200 u16 itr_val;
188 u8 set_itr; 201 u8 set_itr;
@@ -212,16 +225,12 @@ struct igb_ring {
212 u16 next_to_clean ____cacheline_aligned_in_smp; 225 u16 next_to_clean ____cacheline_aligned_in_smp;
213 u16 next_to_use; 226 u16 next_to_use;
214 227
215 unsigned int total_bytes;
216 unsigned int total_packets;
217
218 union { 228 union {
219 /* TX */ 229 /* TX */
220 struct { 230 struct {
221 struct igb_tx_queue_stats tx_stats; 231 struct igb_tx_queue_stats tx_stats;
222 struct u64_stats_sync tx_syncp; 232 struct u64_stats_sync tx_syncp;
223 struct u64_stats_sync tx_syncp2; 233 struct u64_stats_sync tx_syncp2;
224 bool detect_tx_hung;
225 }; 234 };
226 /* RX */ 235 /* RX */
227 struct { 236 struct {
@@ -231,12 +240,14 @@ struct igb_ring {
231 }; 240 };
232 /* Items past this point are only used during ring alloc / free */ 241 /* Items past this point are only used during ring alloc / free */
233 dma_addr_t dma; /* phys address of the ring */ 242 dma_addr_t dma; /* phys address of the ring */
243 int numa_node; /* node to alloc ring memory on */
234}; 244};
235 245
236#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ 246enum e1000_ring_flags_t {
237#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ 247 IGB_RING_FLAG_RX_SCTP_CSUM,
238 248 IGB_RING_FLAG_TX_CTX_IDX,
239#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ 249 IGB_RING_FLAG_TX_DETECT_HANG
250};
240 251
241#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) 252#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
242 253
@@ -247,6 +258,13 @@ struct igb_ring {
247#define IGB_TX_CTXTDESC(R, i) \ 258#define IGB_TX_CTXTDESC(R, i) \
248 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i])) 259 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
249 260
261/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
262static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
263 const u32 stat_err_bits)
264{
265 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
266}
267
250/* igb_desc_unused - calculate if we have unused descriptors */ 268/* igb_desc_unused - calculate if we have unused descriptors */
251static inline int igb_desc_unused(struct igb_ring *ring) 269static inline int igb_desc_unused(struct igb_ring *ring)
252{ 270{
@@ -340,6 +358,7 @@ struct igb_adapter {
340 int vf_rate_link_speed; 358 int vf_rate_link_speed;
341 u32 rss_queues; 359 u32 rss_queues;
342 u32 wvbr; 360 u32 wvbr;
361 int node;
343}; 362};
344 363
345#define IGB_FLAG_HAS_MSI (1 << 0) 364#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 174540f262d7..43873eba2f63 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1577,16 +1577,14 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1577 union e1000_adv_rx_desc *rx_desc; 1577 union e1000_adv_rx_desc *rx_desc;
1578 struct igb_rx_buffer *rx_buffer_info; 1578 struct igb_rx_buffer *rx_buffer_info;
1579 struct igb_tx_buffer *tx_buffer_info; 1579 struct igb_tx_buffer *tx_buffer_info;
1580 int rx_ntc, tx_ntc, count = 0; 1580 u16 rx_ntc, tx_ntc, count = 0;
1581 u32 staterr;
1582 1581
1583 /* initialize next to clean and descriptor values */ 1582 /* initialize next to clean and descriptor values */
1584 rx_ntc = rx_ring->next_to_clean; 1583 rx_ntc = rx_ring->next_to_clean;
1585 tx_ntc = tx_ring->next_to_clean; 1584 tx_ntc = tx_ring->next_to_clean;
1586 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1585 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1587 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1588 1586
1589 while (staterr & E1000_RXD_STAT_DD) { 1587 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
1590 /* check rx buffer */ 1588 /* check rx buffer */
1591 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1589 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1592 1590
@@ -1615,7 +1613,6 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
1615 1613
1616 /* fetch next descriptor */ 1614 /* fetch next descriptor */
1617 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); 1615 rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
1618 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1619 } 1616 }
1620 1617
1621 /* re-map buffers to ring, store next to clean values */ 1618 /* re-map buffers to ring, store next to clean values */
@@ -1630,7 +1627,8 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1630{ 1627{
1631 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1628 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1632 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1629 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1633 int i, j, lc, good_cnt, ret_val = 0; 1630 u16 i, j, lc, good_cnt;
1631 int ret_val = 0;
1634 unsigned int size = IGB_RX_HDR_LEN; 1632 unsigned int size = IGB_RX_HDR_LEN;
1635 netdev_tx_t tx_ret_val; 1633 netdev_tx_t tx_ret_val;
1636 struct sk_buff *skb; 1634 struct sk_buff *skb;
@@ -2008,8 +2006,8 @@ static int igb_set_coalesce(struct net_device *netdev,
2008 2006
2009 for (i = 0; i < adapter->num_q_vectors; i++) { 2007 for (i = 0; i < adapter->num_q_vectors; i++) {
2010 struct igb_q_vector *q_vector = adapter->q_vector[i]; 2008 struct igb_q_vector *q_vector = adapter->q_vector[i];
2011 q_vector->tx_work_limit = adapter->tx_work_limit; 2009 q_vector->tx.work_limit = adapter->tx_work_limit;
2012 if (q_vector->rx_ring) 2010 if (q_vector->rx.ring)
2013 q_vector->itr_val = adapter->rx_itr_setting; 2011 q_vector->itr_val = adapter->rx_itr_setting;
2014 else 2012 else
2015 q_vector->itr_val = adapter->tx_itr_setting; 2013 q_vector->itr_val = adapter->tx_itr_setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 862dd7c0cc70..10670f944115 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -338,14 +338,13 @@ static void igb_dump(struct igb_adapter *adapter)
338 struct net_device *netdev = adapter->netdev; 338 struct net_device *netdev = adapter->netdev;
339 struct e1000_hw *hw = &adapter->hw; 339 struct e1000_hw *hw = &adapter->hw;
340 struct igb_reg_info *reginfo; 340 struct igb_reg_info *reginfo;
341 int n = 0;
342 struct igb_ring *tx_ring; 341 struct igb_ring *tx_ring;
343 union e1000_adv_tx_desc *tx_desc; 342 union e1000_adv_tx_desc *tx_desc;
344 struct my_u0 { u64 a; u64 b; } *u0; 343 struct my_u0 { u64 a; u64 b; } *u0;
345 struct igb_ring *rx_ring; 344 struct igb_ring *rx_ring;
346 union e1000_adv_rx_desc *rx_desc; 345 union e1000_adv_rx_desc *rx_desc;
347 u32 staterr; 346 u32 staterr;
348 int i = 0; 347 u16 i, n;
349 348
350 if (!netif_msg_hw(adapter)) 349 if (!netif_msg_hw(adapter))
351 return; 350 return;
@@ -687,60 +686,111 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
687{ 686{
688 struct igb_ring *ring; 687 struct igb_ring *ring;
689 int i; 688 int i;
689 int orig_node = adapter->node;
690 690
691 for (i = 0; i < adapter->num_tx_queues; i++) { 691 for (i = 0; i < adapter->num_tx_queues; i++) {
692 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); 692 if (orig_node == -1) {
693 int cur_node = next_online_node(adapter->node);
694 if (cur_node == MAX_NUMNODES)
695 cur_node = first_online_node;
696 adapter->node = cur_node;
697 }
698 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
699 adapter->node);
700 if (!ring)
701 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
693 if (!ring) 702 if (!ring)
694 goto err; 703 goto err;
695 ring->count = adapter->tx_ring_count; 704 ring->count = adapter->tx_ring_count;
696 ring->queue_index = i; 705 ring->queue_index = i;
697 ring->dev = &adapter->pdev->dev; 706 ring->dev = &adapter->pdev->dev;
698 ring->netdev = adapter->netdev; 707 ring->netdev = adapter->netdev;
708 ring->numa_node = adapter->node;
699 /* For 82575, context index must be unique per ring. */ 709 /* For 82575, context index must be unique per ring. */
700 if (adapter->hw.mac.type == e1000_82575) 710 if (adapter->hw.mac.type == e1000_82575)
701 ring->flags = IGB_RING_FLAG_TX_CTX_IDX; 711 set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
702 adapter->tx_ring[i] = ring; 712 adapter->tx_ring[i] = ring;
703 } 713 }
714 /* Restore the adapter's original node */
715 adapter->node = orig_node;
704 716
705 for (i = 0; i < adapter->num_rx_queues; i++) { 717 for (i = 0; i < adapter->num_rx_queues; i++) {
706 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL); 718 if (orig_node == -1) {
719 int cur_node = next_online_node(adapter->node);
720 if (cur_node == MAX_NUMNODES)
721 cur_node = first_online_node;
722 adapter->node = cur_node;
723 }
724 ring = kzalloc_node(sizeof(struct igb_ring), GFP_KERNEL,
725 adapter->node);
726 if (!ring)
727 ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
707 if (!ring) 728 if (!ring)
708 goto err; 729 goto err;
709 ring->count = adapter->rx_ring_count; 730 ring->count = adapter->rx_ring_count;
710 ring->queue_index = i; 731 ring->queue_index = i;
711 ring->dev = &adapter->pdev->dev; 732 ring->dev = &adapter->pdev->dev;
712 ring->netdev = adapter->netdev; 733 ring->netdev = adapter->netdev;
713 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ 734 ring->numa_node = adapter->node;
714 /* set flag indicating ring supports SCTP checksum offload */ 735 /* set flag indicating ring supports SCTP checksum offload */
715 if (adapter->hw.mac.type >= e1000_82576) 736 if (adapter->hw.mac.type >= e1000_82576)
716 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; 737 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
717 adapter->rx_ring[i] = ring; 738 adapter->rx_ring[i] = ring;
718 } 739 }
740 /* Restore the adapter's original node */
741 adapter->node = orig_node;
719 742
720 igb_cache_ring_register(adapter); 743 igb_cache_ring_register(adapter);
721 744
722 return 0; 745 return 0;
723 746
724err: 747err:
748 /* Restore the adapter's original node */
749 adapter->node = orig_node;
725 igb_free_queues(adapter); 750 igb_free_queues(adapter);
726 751
727 return -ENOMEM; 752 return -ENOMEM;
728} 753}
729 754
755/**
756 * igb_write_ivar - configure ivar for given MSI-X vector
757 * @hw: pointer to the HW structure
758 * @msix_vector: vector number we are allocating to a given ring
759 * @index: row index of IVAR register to write within IVAR table
760 * @offset: column offset of in IVAR, should be multiple of 8
761 *
762 * This function is intended to handle the writing of the IVAR register
763 * for adapters 82576 and newer. The IVAR table consists of 2 columns,
764 * each containing an cause allocation for an Rx and Tx ring, and a
765 * variable number of rows depending on the number of queues supported.
766 **/
767static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
768 int index, int offset)
769{
770 u32 ivar = array_rd32(E1000_IVAR0, index);
771
772 /* clear any bits that are currently set */
773 ivar &= ~((u32)0xFF << offset);
774
775 /* write vector and valid bit */
776 ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
777
778 array_wr32(E1000_IVAR0, index, ivar);
779}
780
730#define IGB_N0_QUEUE -1 781#define IGB_N0_QUEUE -1
731static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector) 782static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
732{ 783{
733 u32 msixbm = 0;
734 struct igb_adapter *adapter = q_vector->adapter; 784 struct igb_adapter *adapter = q_vector->adapter;
735 struct e1000_hw *hw = &adapter->hw; 785 struct e1000_hw *hw = &adapter->hw;
736 u32 ivar, index;
737 int rx_queue = IGB_N0_QUEUE; 786 int rx_queue = IGB_N0_QUEUE;
738 int tx_queue = IGB_N0_QUEUE; 787 int tx_queue = IGB_N0_QUEUE;
788 u32 msixbm = 0;
739 789
740 if (q_vector->rx_ring) 790 if (q_vector->rx.ring)
741 rx_queue = q_vector->rx_ring->reg_idx; 791 rx_queue = q_vector->rx.ring->reg_idx;
742 if (q_vector->tx_ring) 792 if (q_vector->tx.ring)
743 tx_queue = q_vector->tx_ring->reg_idx; 793 tx_queue = q_vector->tx.ring->reg_idx;
744 794
745 switch (hw->mac.type) { 795 switch (hw->mac.type) {
746 case e1000_82575: 796 case e1000_82575:
@@ -758,72 +808,39 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
758 q_vector->eims_value = msixbm; 808 q_vector->eims_value = msixbm;
759 break; 809 break;
760 case e1000_82576: 810 case e1000_82576:
761 /* 82576 uses a table-based method for assigning vectors. 811 /*
762 Each queue has a single entry in the table to which we write 812 * 82576 uses a table that essentially consists of 2 columns
763 a vector number along with a "valid" bit. Sadly, the layout 813 * with 8 rows. The ordering is column-major so we use the
764 of the table is somewhat counterintuitive. */ 814 * lower 3 bits as the row index, and the 4th bit as the
765 if (rx_queue > IGB_N0_QUEUE) { 815 * column offset.
766 index = (rx_queue & 0x7); 816 */
767 ivar = array_rd32(E1000_IVAR0, index); 817 if (rx_queue > IGB_N0_QUEUE)
768 if (rx_queue < 8) { 818 igb_write_ivar(hw, msix_vector,
769 /* vector goes into low byte of register */ 819 rx_queue & 0x7,
770 ivar = ivar & 0xFFFFFF00; 820 (rx_queue & 0x8) << 1);
771 ivar |= msix_vector | E1000_IVAR_VALID; 821 if (tx_queue > IGB_N0_QUEUE)
772 } else { 822 igb_write_ivar(hw, msix_vector,
773 /* vector goes into third byte of register */ 823 tx_queue & 0x7,
774 ivar = ivar & 0xFF00FFFF; 824 ((tx_queue & 0x8) << 1) + 8);
775 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
776 }
777 array_wr32(E1000_IVAR0, index, ivar);
778 }
779 if (tx_queue > IGB_N0_QUEUE) {
780 index = (tx_queue & 0x7);
781 ivar = array_rd32(E1000_IVAR0, index);
782 if (tx_queue < 8) {
783 /* vector goes into second byte of register */
784 ivar = ivar & 0xFFFF00FF;
785 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
786 } else {
787 /* vector goes into high byte of register */
788 ivar = ivar & 0x00FFFFFF;
789 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
790 }
791 array_wr32(E1000_IVAR0, index, ivar);
792 }
793 q_vector->eims_value = 1 << msix_vector; 825 q_vector->eims_value = 1 << msix_vector;
794 break; 826 break;
795 case e1000_82580: 827 case e1000_82580:
796 case e1000_i350: 828 case e1000_i350:
797 /* 82580 uses the same table-based approach as 82576 but has fewer 829 /*
798 entries as a result we carry over for queues greater than 4. */ 830 * On 82580 and newer adapters the scheme is similar to 82576
799 if (rx_queue > IGB_N0_QUEUE) { 831 * however instead of ordering column-major we have things
800 index = (rx_queue >> 1); 832 * ordered row-major. So we traverse the table by using
801 ivar = array_rd32(E1000_IVAR0, index); 833 * bit 0 as the column offset, and the remaining bits as the
802 if (rx_queue & 0x1) { 834 * row index.
803 /* vector goes into third byte of register */ 835 */
804 ivar = ivar & 0xFF00FFFF; 836 if (rx_queue > IGB_N0_QUEUE)
805 ivar |= (msix_vector | E1000_IVAR_VALID) << 16; 837 igb_write_ivar(hw, msix_vector,
806 } else { 838 rx_queue >> 1,
807 /* vector goes into low byte of register */ 839 (rx_queue & 0x1) << 4);
808 ivar = ivar & 0xFFFFFF00; 840 if (tx_queue > IGB_N0_QUEUE)
809 ivar |= msix_vector | E1000_IVAR_VALID; 841 igb_write_ivar(hw, msix_vector,
810 } 842 tx_queue >> 1,
811 array_wr32(E1000_IVAR0, index, ivar); 843 ((tx_queue & 0x1) << 4) + 8);
812 }
813 if (tx_queue > IGB_N0_QUEUE) {
814 index = (tx_queue >> 1);
815 ivar = array_rd32(E1000_IVAR0, index);
816 if (tx_queue & 0x1) {
817 /* vector goes into high byte of register */
818 ivar = ivar & 0x00FFFFFF;
819 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
820 } else {
821 /* vector goes into second byte of register */
822 ivar = ivar & 0xFFFF00FF;
823 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
824 }
825 array_wr32(E1000_IVAR0, index, ivar);
826 }
827 q_vector->eims_value = 1 << msix_vector; 844 q_vector->eims_value = 1 << msix_vector;
828 break; 845 break;
829 default: 846 default:
@@ -923,15 +940,15 @@ static int igb_request_msix(struct igb_adapter *adapter)
923 940
924 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector); 941 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
925 942
926 if (q_vector->rx_ring && q_vector->tx_ring) 943 if (q_vector->rx.ring && q_vector->tx.ring)
927 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name, 944 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
928 q_vector->rx_ring->queue_index); 945 q_vector->rx.ring->queue_index);
929 else if (q_vector->tx_ring) 946 else if (q_vector->tx.ring)
930 sprintf(q_vector->name, "%s-tx-%u", netdev->name, 947 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
931 q_vector->tx_ring->queue_index); 948 q_vector->tx.ring->queue_index);
932 else if (q_vector->rx_ring) 949 else if (q_vector->rx.ring)
933 sprintf(q_vector->name, "%s-rx-%u", netdev->name, 950 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
934 q_vector->rx_ring->queue_index); 951 q_vector->rx.ring->queue_index);
935 else 952 else
936 sprintf(q_vector->name, "%s-unused", netdev->name); 953 sprintf(q_vector->name, "%s-unused", netdev->name);
937 954
@@ -1087,9 +1104,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1087 struct igb_q_vector *q_vector; 1104 struct igb_q_vector *q_vector;
1088 struct e1000_hw *hw = &adapter->hw; 1105 struct e1000_hw *hw = &adapter->hw;
1089 int v_idx; 1106 int v_idx;
1107 int orig_node = adapter->node;
1090 1108
1091 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { 1109 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
1092 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL); 1110 if ((adapter->num_q_vectors == (adapter->num_rx_queues +
1111 adapter->num_tx_queues)) &&
1112 (adapter->num_rx_queues == v_idx))
1113 adapter->node = orig_node;
1114 if (orig_node == -1) {
1115 int cur_node = next_online_node(adapter->node);
1116 if (cur_node == MAX_NUMNODES)
1117 cur_node = first_online_node;
1118 adapter->node = cur_node;
1119 }
1120 q_vector = kzalloc_node(sizeof(struct igb_q_vector), GFP_KERNEL,
1121 adapter->node);
1122 if (!q_vector)
1123 q_vector = kzalloc(sizeof(struct igb_q_vector),
1124 GFP_KERNEL);
1093 if (!q_vector) 1125 if (!q_vector)
1094 goto err_out; 1126 goto err_out;
1095 q_vector->adapter = adapter; 1127 q_vector->adapter = adapter;
@@ -1098,9 +1130,14 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1098 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64); 1130 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
1099 adapter->q_vector[v_idx] = q_vector; 1131 adapter->q_vector[v_idx] = q_vector;
1100 } 1132 }
1133 /* Restore the adapter's original node */
1134 adapter->node = orig_node;
1135
1101 return 0; 1136 return 0;
1102 1137
1103err_out: 1138err_out:
1139 /* Restore the adapter's original node */
1140 adapter->node = orig_node;
1104 igb_free_q_vectors(adapter); 1141 igb_free_q_vectors(adapter);
1105 return -ENOMEM; 1142 return -ENOMEM;
1106} 1143}
@@ -1110,8 +1147,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
1110{ 1147{
1111 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1148 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1112 1149
1113 q_vector->rx_ring = adapter->rx_ring[ring_idx]; 1150 q_vector->rx.ring = adapter->rx_ring[ring_idx];
1114 q_vector->rx_ring->q_vector = q_vector; 1151 q_vector->rx.ring->q_vector = q_vector;
1152 q_vector->rx.count++;
1115 q_vector->itr_val = adapter->rx_itr_setting; 1153 q_vector->itr_val = adapter->rx_itr_setting;
1116 if (q_vector->itr_val && q_vector->itr_val <= 3) 1154 if (q_vector->itr_val && q_vector->itr_val <= 3)
1117 q_vector->itr_val = IGB_START_ITR; 1155 q_vector->itr_val = IGB_START_ITR;
@@ -1122,10 +1160,11 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
1122{ 1160{
1123 struct igb_q_vector *q_vector = adapter->q_vector[v_idx]; 1161 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
1124 1162
1125 q_vector->tx_ring = adapter->tx_ring[ring_idx]; 1163 q_vector->tx.ring = adapter->tx_ring[ring_idx];
1126 q_vector->tx_ring->q_vector = q_vector; 1164 q_vector->tx.ring->q_vector = q_vector;
1165 q_vector->tx.count++;
1127 q_vector->itr_val = adapter->tx_itr_setting; 1166 q_vector->itr_val = adapter->tx_itr_setting;
1128 q_vector->tx_work_limit = adapter->tx_work_limit; 1167 q_vector->tx.work_limit = adapter->tx_work_limit;
1129 if (q_vector->itr_val && q_vector->itr_val <= 3) 1168 if (q_vector->itr_val && q_vector->itr_val <= 3)
1130 q_vector->itr_val = IGB_START_ITR; 1169 q_vector->itr_val = IGB_START_ITR;
1131} 1170}
@@ -1770,17 +1809,8 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
1770 1809
1771static int igb_set_features(struct net_device *netdev, u32 features) 1810static int igb_set_features(struct net_device *netdev, u32 features)
1772{ 1811{
1773 struct igb_adapter *adapter = netdev_priv(netdev);
1774 int i;
1775 u32 changed = netdev->features ^ features; 1812 u32 changed = netdev->features ^ features;
1776 1813
1777 for (i = 0; i < adapter->num_rx_queues; i++) {
1778 if (features & NETIF_F_RXCSUM)
1779 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
1780 else
1781 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
1782 }
1783
1784 if (changed & NETIF_F_HW_VLAN_RX) 1814 if (changed & NETIF_F_HW_VLAN_RX)
1785 igb_vlan_mode(netdev, features); 1815 igb_vlan_mode(netdev, features);
1786 1816
@@ -1948,23 +1978,32 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1948 dev_info(&pdev->dev, 1978 dev_info(&pdev->dev,
1949 "PHY reset is blocked due to SOL/IDER session.\n"); 1979 "PHY reset is blocked due to SOL/IDER session.\n");
1950 1980
1951 netdev->hw_features = NETIF_F_SG | 1981 /*
1952 NETIF_F_IP_CSUM | 1982 * features is initialized to 0 in allocation, it might have bits
1953 NETIF_F_IPV6_CSUM | 1983 * set by igb_sw_init so we should use an or instead of an
1954 NETIF_F_TSO | 1984 * assignment.
1955 NETIF_F_TSO6 | 1985 */
1956 NETIF_F_RXCSUM | 1986 netdev->features |= NETIF_F_SG |
1957 NETIF_F_HW_VLAN_RX; 1987 NETIF_F_IP_CSUM |
1958 1988 NETIF_F_IPV6_CSUM |
1959 netdev->features = netdev->hw_features | 1989 NETIF_F_TSO |
1960 NETIF_F_HW_VLAN_TX | 1990 NETIF_F_TSO6 |
1961 NETIF_F_HW_VLAN_FILTER; 1991 NETIF_F_RXHASH |
1962 1992 NETIF_F_RXCSUM |
1963 netdev->vlan_features |= NETIF_F_TSO; 1993 NETIF_F_HW_VLAN_RX |
1964 netdev->vlan_features |= NETIF_F_TSO6; 1994 NETIF_F_HW_VLAN_TX;
1965 netdev->vlan_features |= NETIF_F_IP_CSUM; 1995
1966 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 1996 /* copy netdev features into list of user selectable features */
1967 netdev->vlan_features |= NETIF_F_SG; 1997 netdev->hw_features |= netdev->features;
1998
1999 /* set this bit last since it cannot be part of hw_features */
2000 netdev->features |= NETIF_F_HW_VLAN_FILTER;
2001
2002 netdev->vlan_features |= NETIF_F_TSO |
2003 NETIF_F_TSO6 |
2004 NETIF_F_IP_CSUM |
2005 NETIF_F_IPV6_CSUM |
2006 NETIF_F_SG;
1968 2007
1969 if (pci_using_dac) { 2008 if (pci_using_dac) {
1970 netdev->features |= NETIF_F_HIGHDMA; 2009 netdev->features |= NETIF_F_HIGHDMA;
@@ -2082,8 +2121,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2082 if (err) 2121 if (err)
2083 goto err_register; 2122 goto err_register;
2084 2123
2085 igb_vlan_mode(netdev, netdev->features);
2086
2087 /* carrier off reporting is important to ethtool even BEFORE open */ 2124 /* carrier off reporting is important to ethtool even BEFORE open */
2088 netif_carrier_off(netdev); 2125 netif_carrier_off(netdev);
2089 2126
@@ -2409,6 +2446,8 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2409 VLAN_HLEN; 2446 VLAN_HLEN;
2410 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2447 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2411 2448
2449 adapter->node = -1;
2450
2412 spin_lock_init(&adapter->stats64_lock); 2451 spin_lock_init(&adapter->stats64_lock);
2413#ifdef CONFIG_PCI_IOV 2452#ifdef CONFIG_PCI_IOV
2414 switch (hw->mac.type) { 2453 switch (hw->mac.type) {
@@ -2579,10 +2618,13 @@ static int igb_close(struct net_device *netdev)
2579int igb_setup_tx_resources(struct igb_ring *tx_ring) 2618int igb_setup_tx_resources(struct igb_ring *tx_ring)
2580{ 2619{
2581 struct device *dev = tx_ring->dev; 2620 struct device *dev = tx_ring->dev;
2621 int orig_node = dev_to_node(dev);
2582 int size; 2622 int size;
2583 2623
2584 size = sizeof(struct igb_tx_buffer) * tx_ring->count; 2624 size = sizeof(struct igb_tx_buffer) * tx_ring->count;
2585 tx_ring->tx_buffer_info = vzalloc(size); 2625 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
2626 if (!tx_ring->tx_buffer_info)
2627 tx_ring->tx_buffer_info = vzalloc(size);
2586 if (!tx_ring->tx_buffer_info) 2628 if (!tx_ring->tx_buffer_info)
2587 goto err; 2629 goto err;
2588 2630
@@ -2590,16 +2632,24 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2590 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2632 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
2591 tx_ring->size = ALIGN(tx_ring->size, 4096); 2633 tx_ring->size = ALIGN(tx_ring->size, 4096);
2592 2634
2635 set_dev_node(dev, tx_ring->numa_node);
2593 tx_ring->desc = dma_alloc_coherent(dev, 2636 tx_ring->desc = dma_alloc_coherent(dev,
2594 tx_ring->size, 2637 tx_ring->size,
2595 &tx_ring->dma, 2638 &tx_ring->dma,
2596 GFP_KERNEL); 2639 GFP_KERNEL);
2640 set_dev_node(dev, orig_node);
2641 if (!tx_ring->desc)
2642 tx_ring->desc = dma_alloc_coherent(dev,
2643 tx_ring->size,
2644 &tx_ring->dma,
2645 GFP_KERNEL);
2597 2646
2598 if (!tx_ring->desc) 2647 if (!tx_ring->desc)
2599 goto err; 2648 goto err;
2600 2649
2601 tx_ring->next_to_use = 0; 2650 tx_ring->next_to_use = 0;
2602 tx_ring->next_to_clean = 0; 2651 tx_ring->next_to_clean = 0;
2652
2603 return 0; 2653 return 0;
2604 2654
2605err: 2655err:
@@ -2722,10 +2772,13 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2722int igb_setup_rx_resources(struct igb_ring *rx_ring) 2772int igb_setup_rx_resources(struct igb_ring *rx_ring)
2723{ 2773{
2724 struct device *dev = rx_ring->dev; 2774 struct device *dev = rx_ring->dev;
2775 int orig_node = dev_to_node(dev);
2725 int size, desc_len; 2776 int size, desc_len;
2726 2777
2727 size = sizeof(struct igb_rx_buffer) * rx_ring->count; 2778 size = sizeof(struct igb_rx_buffer) * rx_ring->count;
2728 rx_ring->rx_buffer_info = vzalloc(size); 2779 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
2780 if (!rx_ring->rx_buffer_info)
2781 rx_ring->rx_buffer_info = vzalloc(size);
2729 if (!rx_ring->rx_buffer_info) 2782 if (!rx_ring->rx_buffer_info)
2730 goto err; 2783 goto err;
2731 2784
@@ -2735,10 +2788,17 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2735 rx_ring->size = rx_ring->count * desc_len; 2788 rx_ring->size = rx_ring->count * desc_len;
2736 rx_ring->size = ALIGN(rx_ring->size, 4096); 2789 rx_ring->size = ALIGN(rx_ring->size, 4096);
2737 2790
2791 set_dev_node(dev, rx_ring->numa_node);
2738 rx_ring->desc = dma_alloc_coherent(dev, 2792 rx_ring->desc = dma_alloc_coherent(dev,
2739 rx_ring->size, 2793 rx_ring->size,
2740 &rx_ring->dma, 2794 &rx_ring->dma,
2741 GFP_KERNEL); 2795 GFP_KERNEL);
2796 set_dev_node(dev, orig_node);
2797 if (!rx_ring->desc)
2798 rx_ring->desc = dma_alloc_coherent(dev,
2799 rx_ring->size,
2800 &rx_ring->dma,
2801 GFP_KERNEL);
2742 2802
2743 if (!rx_ring->desc) 2803 if (!rx_ring->desc)
2744 goto err; 2804 goto err;
@@ -3169,7 +3229,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3169{ 3229{
3170 struct igb_tx_buffer *buffer_info; 3230 struct igb_tx_buffer *buffer_info;
3171 unsigned long size; 3231 unsigned long size;
3172 unsigned int i; 3232 u16 i;
3173 3233
3174 if (!tx_ring->tx_buffer_info) 3234 if (!tx_ring->tx_buffer_info)
3175 return; 3235 return;
@@ -3703,7 +3763,7 @@ static void igb_watchdog_task(struct work_struct *work)
3703 } 3763 }
3704 3764
3705 /* Force detection of hung controller every watchdog period */ 3765 /* Force detection of hung controller every watchdog period */
3706 tx_ring->detect_tx_hung = true; 3766 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
3707 } 3767 }
3708 3768
3709 /* Cause software interrupt to ensure rx ring is cleaned */ 3769 /* Cause software interrupt to ensure rx ring is cleaned */
@@ -3754,33 +3814,24 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3754 int new_val = q_vector->itr_val; 3814 int new_val = q_vector->itr_val;
3755 int avg_wire_size = 0; 3815 int avg_wire_size = 0;
3756 struct igb_adapter *adapter = q_vector->adapter; 3816 struct igb_adapter *adapter = q_vector->adapter;
3757 struct igb_ring *ring;
3758 unsigned int packets; 3817 unsigned int packets;
3759 3818
3760 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3819 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3761 * ints/sec - ITR timer value of 120 ticks. 3820 * ints/sec - ITR timer value of 120 ticks.
3762 */ 3821 */
3763 if (adapter->link_speed != SPEED_1000) { 3822 if (adapter->link_speed != SPEED_1000) {
3764 new_val = 976; 3823 new_val = IGB_4K_ITR;
3765 goto set_itr_val; 3824 goto set_itr_val;
3766 } 3825 }
3767 3826
3768 ring = q_vector->rx_ring; 3827 packets = q_vector->rx.total_packets;
3769 if (ring) { 3828 if (packets)
3770 packets = ACCESS_ONCE(ring->total_packets); 3829 avg_wire_size = q_vector->rx.total_bytes / packets;
3771 3830
3772 if (packets) 3831 packets = q_vector->tx.total_packets;
3773 avg_wire_size = ring->total_bytes / packets; 3832 if (packets)
3774 } 3833 avg_wire_size = max_t(u32, avg_wire_size,
3775 3834 q_vector->tx.total_bytes / packets);
3776 ring = q_vector->tx_ring;
3777 if (ring) {
3778 packets = ACCESS_ONCE(ring->total_packets);
3779
3780 if (packets)
3781 avg_wire_size = max_t(u32, avg_wire_size,
3782 ring->total_bytes / packets);
3783 }
3784 3835
3785 /* if avg_wire_size isn't set no work was done */ 3836 /* if avg_wire_size isn't set no work was done */
3786 if (!avg_wire_size) 3837 if (!avg_wire_size)
@@ -3798,9 +3849,11 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3798 else 3849 else
3799 new_val = avg_wire_size / 2; 3850 new_val = avg_wire_size / 2;
3800 3851
3801 /* when in itr mode 3 do not exceed 20K ints/sec */ 3852 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3802 if (adapter->rx_itr_setting == 3 && new_val < 196) 3853 if (new_val < IGB_20K_ITR &&
3803 new_val = 196; 3854 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3855 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3856 new_val = IGB_20K_ITR;
3804 3857
3805set_itr_val: 3858set_itr_val:
3806 if (new_val != q_vector->itr_val) { 3859 if (new_val != q_vector->itr_val) {
@@ -3808,14 +3861,10 @@ set_itr_val:
3808 q_vector->set_itr = 1; 3861 q_vector->set_itr = 1;
3809 } 3862 }
3810clear_counts: 3863clear_counts:
3811 if (q_vector->rx_ring) { 3864 q_vector->rx.total_bytes = 0;
3812 q_vector->rx_ring->total_bytes = 0; 3865 q_vector->rx.total_packets = 0;
3813 q_vector->rx_ring->total_packets = 0; 3866 q_vector->tx.total_bytes = 0;
3814 } 3867 q_vector->tx.total_packets = 0;
3815 if (q_vector->tx_ring) {
3816 q_vector->tx_ring->total_bytes = 0;
3817 q_vector->tx_ring->total_packets = 0;
3818 }
3819} 3868}
3820 3869
3821/** 3870/**
@@ -3831,106 +3880,102 @@ clear_counts:
3831 * parameter (see igb_param.c) 3880 * parameter (see igb_param.c)
3832 * NOTE: These calculations are only valid when operating in a single- 3881 * NOTE: These calculations are only valid when operating in a single-
3833 * queue environment. 3882 * queue environment.
3834 * @adapter: pointer to adapter 3883 * @q_vector: pointer to q_vector
3835 * @itr_setting: current q_vector->itr_val 3884 * @ring_container: ring info to update the itr for
3836 * @packets: the number of packets during this measurement interval
3837 * @bytes: the number of bytes during this measurement interval
3838 **/ 3885 **/
3839static unsigned int igb_update_itr(struct igb_adapter *adapter, u16 itr_setting, 3886static void igb_update_itr(struct igb_q_vector *q_vector,
3840 int packets, int bytes) 3887 struct igb_ring_container *ring_container)
3841{ 3888{
3842 unsigned int retval = itr_setting; 3889 unsigned int packets = ring_container->total_packets;
3890 unsigned int bytes = ring_container->total_bytes;
3891 u8 itrval = ring_container->itr;
3843 3892
3893 /* no packets, exit with status unchanged */
3844 if (packets == 0) 3894 if (packets == 0)
3845 goto update_itr_done; 3895 return;
3846 3896
3847 switch (itr_setting) { 3897 switch (itrval) {
3848 case lowest_latency: 3898 case lowest_latency:
3849 /* handle TSO and jumbo frames */ 3899 /* handle TSO and jumbo frames */
3850 if (bytes/packets > 8000) 3900 if (bytes/packets > 8000)
3851 retval = bulk_latency; 3901 itrval = bulk_latency;
3852 else if ((packets < 5) && (bytes > 512)) 3902 else if ((packets < 5) && (bytes > 512))
3853 retval = low_latency; 3903 itrval = low_latency;
3854 break; 3904 break;
3855 case low_latency: /* 50 usec aka 20000 ints/s */ 3905 case low_latency: /* 50 usec aka 20000 ints/s */
3856 if (bytes > 10000) { 3906 if (bytes > 10000) {
3857 /* this if handles the TSO accounting */ 3907 /* this if handles the TSO accounting */
3858 if (bytes/packets > 8000) { 3908 if (bytes/packets > 8000) {
3859 retval = bulk_latency; 3909 itrval = bulk_latency;
3860 } else if ((packets < 10) || ((bytes/packets) > 1200)) { 3910 } else if ((packets < 10) || ((bytes/packets) > 1200)) {
3861 retval = bulk_latency; 3911 itrval = bulk_latency;
3862 } else if ((packets > 35)) { 3912 } else if ((packets > 35)) {
3863 retval = lowest_latency; 3913 itrval = lowest_latency;
3864 } 3914 }
3865 } else if (bytes/packets > 2000) { 3915 } else if (bytes/packets > 2000) {
3866 retval = bulk_latency; 3916 itrval = bulk_latency;
3867 } else if (packets <= 2 && bytes < 512) { 3917 } else if (packets <= 2 && bytes < 512) {
3868 retval = lowest_latency; 3918 itrval = lowest_latency;
3869 } 3919 }
3870 break; 3920 break;
3871 case bulk_latency: /* 250 usec aka 4000 ints/s */ 3921 case bulk_latency: /* 250 usec aka 4000 ints/s */
3872 if (bytes > 25000) { 3922 if (bytes > 25000) {
3873 if (packets > 35) 3923 if (packets > 35)
3874 retval = low_latency; 3924 itrval = low_latency;
3875 } else if (bytes < 1500) { 3925 } else if (bytes < 1500) {
3876 retval = low_latency; 3926 itrval = low_latency;
3877 } 3927 }
3878 break; 3928 break;
3879 } 3929 }
3880 3930
3881update_itr_done: 3931 /* clear work counters since we have the values we need */
3882 return retval; 3932 ring_container->total_bytes = 0;
3933 ring_container->total_packets = 0;
3934
3935 /* write updated itr to ring container */
3936 ring_container->itr = itrval;
3883} 3937}
3884 3938
3885static void igb_set_itr(struct igb_adapter *adapter) 3939static void igb_set_itr(struct igb_q_vector *q_vector)
3886{ 3940{
3887 struct igb_q_vector *q_vector = adapter->q_vector[0]; 3941 struct igb_adapter *adapter = q_vector->adapter;
3888 u16 current_itr;
3889 u32 new_itr = q_vector->itr_val; 3942 u32 new_itr = q_vector->itr_val;
3943 u8 current_itr = 0;
3890 3944
3891 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3945 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
3892 if (adapter->link_speed != SPEED_1000) { 3946 if (adapter->link_speed != SPEED_1000) {
3893 current_itr = 0; 3947 current_itr = 0;
3894 new_itr = 4000; 3948 new_itr = IGB_4K_ITR;
3895 goto set_itr_now; 3949 goto set_itr_now;
3896 } 3950 }
3897 3951
3898 adapter->rx_itr = igb_update_itr(adapter, 3952 igb_update_itr(q_vector, &q_vector->tx);
3899 adapter->rx_itr, 3953 igb_update_itr(q_vector, &q_vector->rx);
3900 q_vector->rx_ring->total_packets,
3901 q_vector->rx_ring->total_bytes);
3902 3954
3903 adapter->tx_itr = igb_update_itr(adapter, 3955 current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
3904 adapter->tx_itr,
3905 q_vector->tx_ring->total_packets,
3906 q_vector->tx_ring->total_bytes);
3907 current_itr = max(adapter->rx_itr, adapter->tx_itr);
3908 3956
3909 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3957 /* conservative mode (itr 3) eliminates the lowest_latency setting */
3910 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency) 3958 if (current_itr == lowest_latency &&
3959 ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
3960 (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
3911 current_itr = low_latency; 3961 current_itr = low_latency;
3912 3962
3913 switch (current_itr) { 3963 switch (current_itr) {
3914 /* counts and packets in update_itr are dependent on these numbers */ 3964 /* counts and packets in update_itr are dependent on these numbers */
3915 case lowest_latency: 3965 case lowest_latency:
3916 new_itr = 56; /* aka 70,000 ints/sec */ 3966 new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
3917 break; 3967 break;
3918 case low_latency: 3968 case low_latency:
3919 new_itr = 196; /* aka 20,000 ints/sec */ 3969 new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
3920 break; 3970 break;
3921 case bulk_latency: 3971 case bulk_latency:
3922 new_itr = 980; /* aka 4,000 ints/sec */ 3972 new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
3923 break; 3973 break;
3924 default: 3974 default:
3925 break; 3975 break;
3926 } 3976 }
3927 3977
3928set_itr_now: 3978set_itr_now:
3929 q_vector->rx_ring->total_bytes = 0;
3930 q_vector->rx_ring->total_packets = 0;
3931 q_vector->tx_ring->total_bytes = 0;
3932 q_vector->tx_ring->total_packets = 0;
3933
3934 if (new_itr != q_vector->itr_val) { 3979 if (new_itr != q_vector->itr_val) {
3935 /* this attempts to bias the interrupt rate towards Bulk 3980 /* this attempts to bias the interrupt rate towards Bulk
3936 * by adding intermediate steps when interrupt rate is 3981 * by adding intermediate steps when interrupt rate is
@@ -3938,7 +3983,7 @@ set_itr_now:
3938 new_itr = new_itr > q_vector->itr_val ? 3983 new_itr = new_itr > q_vector->itr_val ?
3939 max((new_itr * q_vector->itr_val) / 3984 max((new_itr * q_vector->itr_val) /
3940 (new_itr + (q_vector->itr_val >> 2)), 3985 (new_itr + (q_vector->itr_val >> 2)),
3941 new_itr) : 3986 new_itr) :
3942 new_itr; 3987 new_itr;
3943 /* Don't write the value here; it resets the adapter's 3988 /* Don't write the value here; it resets the adapter's
3944 * internal timer, and causes us to delay far longer than 3989 * internal timer, and causes us to delay far longer than
@@ -3966,7 +4011,7 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3966 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; 4011 type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
3967 4012
3968 /* For 82575, context index must be unique per ring. */ 4013 /* For 82575, context index must be unique per ring. */
3969 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) 4014 if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
3970 mss_l4len_idx |= tx_ring->reg_idx << 4; 4015 mss_l4len_idx |= tx_ring->reg_idx << 4;
3971 4016
3972 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 4017 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
@@ -3975,10 +4020,11 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
3975 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 4020 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3976} 4021}
3977 4022
3978static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb, 4023static int igb_tso(struct igb_ring *tx_ring,
3979 u32 tx_flags, __be16 protocol, u8 *hdr_len) 4024 struct igb_tx_buffer *first,
4025 u8 *hdr_len)
3980{ 4026{
3981 int err; 4027 struct sk_buff *skb = first->skb;
3982 u32 vlan_macip_lens, type_tucmd; 4028 u32 vlan_macip_lens, type_tucmd;
3983 u32 mss_l4len_idx, l4len; 4029 u32 mss_l4len_idx, l4len;
3984 4030
@@ -3986,7 +4032,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
3986 return 0; 4032 return 0;
3987 4033
3988 if (skb_header_cloned(skb)) { 4034 if (skb_header_cloned(skb)) {
3989 err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 4035 int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3990 if (err) 4036 if (err)
3991 return err; 4037 return err;
3992 } 4038 }
@@ -3994,7 +4040,7 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
3994 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 4040 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
3995 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; 4041 type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
3996 4042
3997 if (protocol == __constant_htons(ETH_P_IP)) { 4043 if (first->protocol == __constant_htons(ETH_P_IP)) {
3998 struct iphdr *iph = ip_hdr(skb); 4044 struct iphdr *iph = ip_hdr(skb);
3999 iph->tot_len = 0; 4045 iph->tot_len = 0;
4000 iph->check = 0; 4046 iph->check = 0;
@@ -4003,16 +4049,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
4003 IPPROTO_TCP, 4049 IPPROTO_TCP,
4004 0); 4050 0);
4005 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4051 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
4052 first->tx_flags |= IGB_TX_FLAGS_TSO |
4053 IGB_TX_FLAGS_CSUM |
4054 IGB_TX_FLAGS_IPV4;
4006 } else if (skb_is_gso_v6(skb)) { 4055 } else if (skb_is_gso_v6(skb)) {
4007 ipv6_hdr(skb)->payload_len = 0; 4056 ipv6_hdr(skb)->payload_len = 0;
4008 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 4057 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4009 &ipv6_hdr(skb)->daddr, 4058 &ipv6_hdr(skb)->daddr,
4010 0, IPPROTO_TCP, 0); 4059 0, IPPROTO_TCP, 0);
4060 first->tx_flags |= IGB_TX_FLAGS_TSO |
4061 IGB_TX_FLAGS_CSUM;
4011 } 4062 }
4012 4063
4064 /* compute header lengths */
4013 l4len = tcp_hdrlen(skb); 4065 l4len = tcp_hdrlen(skb);
4014 *hdr_len = skb_transport_offset(skb) + l4len; 4066 *hdr_len = skb_transport_offset(skb) + l4len;
4015 4067
4068 /* update gso size and bytecount with header size */
4069 first->gso_segs = skb_shinfo(skb)->gso_segs;
4070 first->bytecount += (first->gso_segs - 1) * *hdr_len;
4071
4016 /* MSS L4LEN IDX */ 4072 /* MSS L4LEN IDX */
4017 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT; 4073 mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
4018 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT; 4074 mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
@@ -4020,26 +4076,26 @@ static inline int igb_tso(struct igb_ring *tx_ring, struct sk_buff *skb,
4020 /* VLAN MACLEN IPLEN */ 4076 /* VLAN MACLEN IPLEN */
4021 vlan_macip_lens = skb_network_header_len(skb); 4077 vlan_macip_lens = skb_network_header_len(skb);
4022 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; 4078 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4023 vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK; 4079 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4024 4080
4025 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); 4081 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4026 4082
4027 return 1; 4083 return 1;
4028} 4084}
4029 4085
4030static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb, 4086static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4031 u32 tx_flags, __be16 protocol)
4032{ 4087{
4088 struct sk_buff *skb = first->skb;
4033 u32 vlan_macip_lens = 0; 4089 u32 vlan_macip_lens = 0;
4034 u32 mss_l4len_idx = 0; 4090 u32 mss_l4len_idx = 0;
4035 u32 type_tucmd = 0; 4091 u32 type_tucmd = 0;
4036 4092
4037 if (skb->ip_summed != CHECKSUM_PARTIAL) { 4093 if (skb->ip_summed != CHECKSUM_PARTIAL) {
4038 if (!(tx_flags & IGB_TX_FLAGS_VLAN)) 4094 if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
4039 return false; 4095 return;
4040 } else { 4096 } else {
4041 u8 l4_hdr = 0; 4097 u8 l4_hdr = 0;
4042 switch (protocol) { 4098 switch (first->protocol) {
4043 case __constant_htons(ETH_P_IP): 4099 case __constant_htons(ETH_P_IP):
4044 vlan_macip_lens |= skb_network_header_len(skb); 4100 vlan_macip_lens |= skb_network_header_len(skb);
4045 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4; 4101 type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
@@ -4053,7 +4109,7 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
4053 if (unlikely(net_ratelimit())) { 4109 if (unlikely(net_ratelimit())) {
4054 dev_warn(tx_ring->dev, 4110 dev_warn(tx_ring->dev,
4055 "partial checksum but proto=%x!\n", 4111 "partial checksum but proto=%x!\n",
4056 protocol); 4112 first->protocol);
4057 } 4113 }
4058 break; 4114 break;
4059 } 4115 }
@@ -4081,14 +4137,15 @@ static inline bool igb_tx_csum(struct igb_ring *tx_ring, struct sk_buff *skb,
4081 } 4137 }
4082 break; 4138 break;
4083 } 4139 }
4140
4141 /* update TX checksum flag */
4142 first->tx_flags |= IGB_TX_FLAGS_CSUM;
4084 } 4143 }
4085 4144
4086 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT; 4145 vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
4087 vlan_macip_lens |= tx_flags & IGB_TX_FLAGS_VLAN_MASK; 4146 vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
4088 4147
4089 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx); 4148 igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
4090
4091 return (skb->ip_summed == CHECKSUM_PARTIAL);
4092} 4149}
4093 4150
4094static __le32 igb_tx_cmd_type(u32 tx_flags) 4151static __le32 igb_tx_cmd_type(u32 tx_flags)
@@ -4113,14 +4170,15 @@ static __le32 igb_tx_cmd_type(u32 tx_flags)
4113 return cmd_type; 4170 return cmd_type;
4114} 4171}
4115 4172
4116static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen, 4173static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
4117 struct igb_ring *tx_ring) 4174 union e1000_adv_tx_desc *tx_desc,
4175 u32 tx_flags, unsigned int paylen)
4118{ 4176{
4119 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT; 4177 u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
4120 4178
4121 /* 82575 requires a unique index per ring if any offload is enabled */ 4179 /* 82575 requires a unique index per ring if any offload is enabled */
4122 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) && 4180 if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
4123 (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)) 4181 test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
4124 olinfo_status |= tx_ring->reg_idx << 4; 4182 olinfo_status |= tx_ring->reg_idx << 4;
4125 4183
4126 /* insert L4 checksum */ 4184 /* insert L4 checksum */
@@ -4132,7 +4190,7 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
4132 olinfo_status |= E1000_TXD_POPTS_IXSM << 8; 4190 olinfo_status |= E1000_TXD_POPTS_IXSM << 8;
4133 } 4191 }
4134 4192
4135 return cpu_to_le32(olinfo_status); 4193 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
4136} 4194}
4137 4195
4138/* 4196/*
@@ -4140,12 +4198,13 @@ static __le32 igb_tx_olinfo_status(u32 tx_flags, unsigned int paylen,
4140 * maintain a power of two alignment we have to limit ourselves to 32K. 4198 * maintain a power of two alignment we have to limit ourselves to 32K.
4141 */ 4199 */
4142#define IGB_MAX_TXD_PWR 15 4200#define IGB_MAX_TXD_PWR 15
4143#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR) 4201#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
4144 4202
4145static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb, 4203static void igb_tx_map(struct igb_ring *tx_ring,
4146 struct igb_tx_buffer *first, u32 tx_flags, 4204 struct igb_tx_buffer *first,
4147 const u8 hdr_len) 4205 const u8 hdr_len)
4148{ 4206{
4207 struct sk_buff *skb = first->skb;
4149 struct igb_tx_buffer *tx_buffer_info; 4208 struct igb_tx_buffer *tx_buffer_info;
4150 union e1000_adv_tx_desc *tx_desc; 4209 union e1000_adv_tx_desc *tx_desc;
4151 dma_addr_t dma; 4210 dma_addr_t dma;
@@ -4154,24 +4213,12 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4154 unsigned int size = skb_headlen(skb); 4213 unsigned int size = skb_headlen(skb);
4155 unsigned int paylen = skb->len - hdr_len; 4214 unsigned int paylen = skb->len - hdr_len;
4156 __le32 cmd_type; 4215 __le32 cmd_type;
4216 u32 tx_flags = first->tx_flags;
4157 u16 i = tx_ring->next_to_use; 4217 u16 i = tx_ring->next_to_use;
4158 u16 gso_segs;
4159
4160 if (tx_flags & IGB_TX_FLAGS_TSO)
4161 gso_segs = skb_shinfo(skb)->gso_segs;
4162 else
4163 gso_segs = 1;
4164
4165 /* multiply data chunks by size of headers */
4166 first->bytecount = paylen + (gso_segs * hdr_len);
4167 first->gso_segs = gso_segs;
4168 first->skb = skb;
4169 4218
4170 tx_desc = IGB_TX_DESC(tx_ring, i); 4219 tx_desc = IGB_TX_DESC(tx_ring, i);
4171 4220
4172 tx_desc->read.olinfo_status = 4221 igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, paylen);
4173 igb_tx_olinfo_status(tx_flags, paylen, tx_ring);
4174
4175 cmd_type = igb_tx_cmd_type(tx_flags); 4222 cmd_type = igb_tx_cmd_type(tx_flags);
4176 4223
4177 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); 4224 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
@@ -4181,7 +4228,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, struct sk_buff *skb,
4181 /* record length, and DMA address */ 4228 /* record length, and DMA address */
4182 first->length = size; 4229 first->length = size;
4183 first->dma = dma; 4230 first->dma = dma;
4184 first->tx_flags = tx_flags;
4185 tx_desc->read.buffer_addr = cpu_to_le64(dma); 4231 tx_desc->read.buffer_addr = cpu_to_le64(dma);
4186 4232
4187 for (;;) { 4233 for (;;) {
@@ -4284,7 +4330,7 @@ dma_error:
4284 tx_ring->next_to_use = i; 4330 tx_ring->next_to_use = i;
4285} 4331}
4286 4332
4287static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) 4333static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4288{ 4334{
4289 struct net_device *netdev = tx_ring->netdev; 4335 struct net_device *netdev = tx_ring->netdev;
4290 4336
@@ -4310,7 +4356,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4310 return 0; 4356 return 0;
4311} 4357}
4312 4358
4313static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size) 4359static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4314{ 4360{
4315 if (igb_desc_unused(tx_ring) >= size) 4361 if (igb_desc_unused(tx_ring) >= size)
4316 return 0; 4362 return 0;
@@ -4336,6 +4382,12 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4336 return NETDEV_TX_BUSY; 4382 return NETDEV_TX_BUSY;
4337 } 4383 }
4338 4384
4385 /* record the location of the first descriptor for this packet */
4386 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
4387 first->skb = skb;
4388 first->bytecount = skb->len;
4389 first->gso_segs = 1;
4390
4339 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { 4391 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4340 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 4392 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4341 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4393 tx_flags |= IGB_TX_FLAGS_TSTAMP;
@@ -4346,22 +4398,17 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4346 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 4398 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4347 } 4399 }
4348 4400
4349 /* record the location of the first descriptor for this packet */ 4401 /* record initial flags and protocol */
4350 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; 4402 first->tx_flags = tx_flags;
4403 first->protocol = protocol;
4351 4404
4352 tso = igb_tso(tx_ring, skb, tx_flags, protocol, &hdr_len); 4405 tso = igb_tso(tx_ring, first, &hdr_len);
4353 if (tso < 0) { 4406 if (tso < 0)
4354 goto out_drop; 4407 goto out_drop;
4355 } else if (tso) { 4408 else if (!tso)
4356 tx_flags |= IGB_TX_FLAGS_TSO | IGB_TX_FLAGS_CSUM; 4409 igb_tx_csum(tx_ring, first);
4357 if (protocol == htons(ETH_P_IP))
4358 tx_flags |= IGB_TX_FLAGS_IPV4;
4359 } else if (igb_tx_csum(tx_ring, skb, tx_flags, protocol) &&
4360 (skb->ip_summed == CHECKSUM_PARTIAL)) {
4361 tx_flags |= IGB_TX_FLAGS_CSUM;
4362 }
4363 4410
4364 igb_tx_map(tx_ring, skb, first, tx_flags, hdr_len); 4411 igb_tx_map(tx_ring, first, hdr_len);
4365 4412
4366 /* Make sure there is space in the ring for the next send. */ 4413 /* Make sure there is space in the ring for the next send. */
4367 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4); 4414 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
@@ -4369,7 +4416,8 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
4369 return NETDEV_TX_OK; 4416 return NETDEV_TX_OK;
4370 4417
4371out_drop: 4418out_drop:
4372 dev_kfree_skb_any(skb); 4419 igb_unmap_and_free_tx_resource(tx_ring, first);
4420
4373 return NETDEV_TX_OK; 4421 return NETDEV_TX_OK;
4374} 4422}
4375 4423
@@ -4755,7 +4803,7 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
4755 if (adapter->hw.mac.type == e1000_82575) 4803 if (adapter->hw.mac.type == e1000_82575)
4756 itr_val |= itr_val << 16; 4804 itr_val |= itr_val << 16;
4757 else 4805 else
4758 itr_val |= 0x8000000; 4806 itr_val |= E1000_EITR_CNT_IGNR;
4759 4807
4760 writel(itr_val, q_vector->itr_register); 4808 writel(itr_val, q_vector->itr_register);
4761 q_vector->set_itr = 0; 4809 q_vector->set_itr = 0;
@@ -4783,8 +4831,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
4783 if (q_vector->cpu == cpu) 4831 if (q_vector->cpu == cpu)
4784 goto out_no_update; 4832 goto out_no_update;
4785 4833
4786 if (q_vector->tx_ring) { 4834 if (q_vector->tx.ring) {
4787 int q = q_vector->tx_ring->reg_idx; 4835 int q = q_vector->tx.ring->reg_idx;
4788 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q)); 4836 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4789 if (hw->mac.type == e1000_82575) { 4837 if (hw->mac.type == e1000_82575) {
4790 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK; 4838 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
@@ -4797,8 +4845,8 @@ static void igb_update_dca(struct igb_q_vector *q_vector)
4797 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN; 4845 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4798 wr32(E1000_DCA_TXCTRL(q), dca_txctrl); 4846 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4799 } 4847 }
4800 if (q_vector->rx_ring) { 4848 if (q_vector->rx.ring) {
4801 int q = q_vector->rx_ring->reg_idx; 4849 int q = q_vector->rx.ring->reg_idx;
4802 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 4850 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4803 if (hw->mac.type == e1000_82575) { 4851 if (hw->mac.type == e1000_82575) {
4804 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4852 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
@@ -5079,7 +5127,6 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
5079 } 5127 }
5080 5128
5081 adapter->vf_data[vf].vlans_enabled++; 5129 adapter->vf_data[vf].vlans_enabled++;
5082 return 0;
5083 } 5130 }
5084 } else { 5131 } else {
5085 if (i < E1000_VLVF_ARRAY_SIZE) { 5132 if (i < E1000_VLVF_ARRAY_SIZE) {
@@ -5442,16 +5489,14 @@ static irqreturn_t igb_intr(int irq, void *data)
5442 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5489 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5443 * need for the IMC write */ 5490 * need for the IMC write */
5444 u32 icr = rd32(E1000_ICR); 5491 u32 icr = rd32(E1000_ICR);
5445 if (!icr)
5446 return IRQ_NONE; /* Not our interrupt */
5447
5448 igb_write_itr(q_vector);
5449 5492
5450 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5493 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5451 * not set, then the adapter didn't send an interrupt */ 5494 * not set, then the adapter didn't send an interrupt */
5452 if (!(icr & E1000_ICR_INT_ASSERTED)) 5495 if (!(icr & E1000_ICR_INT_ASSERTED))
5453 return IRQ_NONE; 5496 return IRQ_NONE;
5454 5497
5498 igb_write_itr(q_vector);
5499
5455 if (icr & E1000_ICR_DRSTA) 5500 if (icr & E1000_ICR_DRSTA)
5456 schedule_work(&adapter->reset_task); 5501 schedule_work(&adapter->reset_task);
5457 5502
@@ -5472,15 +5517,15 @@ static irqreturn_t igb_intr(int irq, void *data)
5472 return IRQ_HANDLED; 5517 return IRQ_HANDLED;
5473} 5518}
5474 5519
5475static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector) 5520void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5476{ 5521{
5477 struct igb_adapter *adapter = q_vector->adapter; 5522 struct igb_adapter *adapter = q_vector->adapter;
5478 struct e1000_hw *hw = &adapter->hw; 5523 struct e1000_hw *hw = &adapter->hw;
5479 5524
5480 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) || 5525 if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
5481 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) { 5526 (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
5482 if (!adapter->msix_entries) 5527 if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
5483 igb_set_itr(adapter); 5528 igb_set_itr(q_vector);
5484 else 5529 else
5485 igb_update_ring_itr(q_vector); 5530 igb_update_ring_itr(q_vector);
5486 } 5531 }
@@ -5509,10 +5554,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
5509 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED) 5554 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
5510 igb_update_dca(q_vector); 5555 igb_update_dca(q_vector);
5511#endif 5556#endif
5512 if (q_vector->tx_ring) 5557 if (q_vector->tx.ring)
5513 clean_complete = igb_clean_tx_irq(q_vector); 5558 clean_complete = igb_clean_tx_irq(q_vector);
5514 5559
5515 if (q_vector->rx_ring) 5560 if (q_vector->rx.ring)
5516 clean_complete &= igb_clean_rx_irq(q_vector, budget); 5561 clean_complete &= igb_clean_rx_irq(q_vector, budget);
5517 5562
5518 /* If all work not completed, return budget and keep polling */ 5563 /* If all work not completed, return budget and keep polling */
@@ -5592,11 +5637,11 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector,
5592static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5637static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5593{ 5638{
5594 struct igb_adapter *adapter = q_vector->adapter; 5639 struct igb_adapter *adapter = q_vector->adapter;
5595 struct igb_ring *tx_ring = q_vector->tx_ring; 5640 struct igb_ring *tx_ring = q_vector->tx.ring;
5596 struct igb_tx_buffer *tx_buffer; 5641 struct igb_tx_buffer *tx_buffer;
5597 union e1000_adv_tx_desc *tx_desc, *eop_desc; 5642 union e1000_adv_tx_desc *tx_desc, *eop_desc;
5598 unsigned int total_bytes = 0, total_packets = 0; 5643 unsigned int total_bytes = 0, total_packets = 0;
5599 unsigned int budget = q_vector->tx_work_limit; 5644 unsigned int budget = q_vector->tx.work_limit;
5600 unsigned int i = tx_ring->next_to_clean; 5645 unsigned int i = tx_ring->next_to_clean;
5601 5646
5602 if (test_bit(__IGB_DOWN, &adapter->state)) 5647 if (test_bit(__IGB_DOWN, &adapter->state))
@@ -5682,17 +5727,17 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5682 tx_ring->tx_stats.bytes += total_bytes; 5727 tx_ring->tx_stats.bytes += total_bytes;
5683 tx_ring->tx_stats.packets += total_packets; 5728 tx_ring->tx_stats.packets += total_packets;
5684 u64_stats_update_end(&tx_ring->tx_syncp); 5729 u64_stats_update_end(&tx_ring->tx_syncp);
5685 tx_ring->total_bytes += total_bytes; 5730 q_vector->tx.total_bytes += total_bytes;
5686 tx_ring->total_packets += total_packets; 5731 q_vector->tx.total_packets += total_packets;
5687 5732
5688 if (tx_ring->detect_tx_hung) { 5733 if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
5689 struct e1000_hw *hw = &adapter->hw; 5734 struct e1000_hw *hw = &adapter->hw;
5690 5735
5691 eop_desc = tx_buffer->next_to_watch; 5736 eop_desc = tx_buffer->next_to_watch;
5692 5737
5693 /* Detect a transmit hang in hardware, this serializes the 5738 /* Detect a transmit hang in hardware, this serializes the
5694 * check with the clearing of time_stamp and movement of i */ 5739 * check with the clearing of time_stamp and movement of i */
5695 tx_ring->detect_tx_hung = false; 5740 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
5696 if (eop_desc && 5741 if (eop_desc &&
5697 time_after(jiffies, tx_buffer->time_stamp + 5742 time_after(jiffies, tx_buffer->time_stamp +
5698 (adapter->tx_timeout_factor * HZ)) && 5743 (adapter->tx_timeout_factor * HZ)) &&
@@ -5751,25 +5796,30 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5751} 5796}
5752 5797
5753static inline void igb_rx_checksum(struct igb_ring *ring, 5798static inline void igb_rx_checksum(struct igb_ring *ring,
5754 u32 status_err, struct sk_buff *skb) 5799 union e1000_adv_rx_desc *rx_desc,
5800 struct sk_buff *skb)
5755{ 5801{
5756 skb_checksum_none_assert(skb); 5802 skb_checksum_none_assert(skb);
5757 5803
5758 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5804 /* Ignore Checksum bit is set */
5759 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || 5805 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
5760 (status_err & E1000_RXD_STAT_IXSM)) 5806 return;
5807
5808 /* Rx checksum disabled via ethtool */
5809 if (!(ring->netdev->features & NETIF_F_RXCSUM))
5761 return; 5810 return;
5762 5811
5763 /* TCP/UDP checksum error bit is set */ 5812 /* TCP/UDP checksum error bit is set */
5764 if (status_err & 5813 if (igb_test_staterr(rx_desc,
5765 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 5814 E1000_RXDEXT_STATERR_TCPE |
5815 E1000_RXDEXT_STATERR_IPE)) {
5766 /* 5816 /*
5767 * work around errata with sctp packets where the TCPE aka 5817 * work around errata with sctp packets where the TCPE aka
5768 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 5818 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
5769 * packets, (aka let the stack check the crc32c) 5819 * packets, (aka let the stack check the crc32c)
5770 */ 5820 */
5771 if ((skb->len == 60) && 5821 if (!((skb->len == 60) &&
5772 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { 5822 test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
5773 u64_stats_update_begin(&ring->rx_syncp); 5823 u64_stats_update_begin(&ring->rx_syncp);
5774 ring->rx_stats.csum_err++; 5824 ring->rx_stats.csum_err++;
5775 u64_stats_update_end(&ring->rx_syncp); 5825 u64_stats_update_end(&ring->rx_syncp);
@@ -5778,19 +5828,34 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
5778 return; 5828 return;
5779 } 5829 }
5780 /* It must be a TCP or UDP packet with a valid checksum */ 5830 /* It must be a TCP or UDP packet with a valid checksum */
5781 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 5831 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
5832 E1000_RXD_STAT_UDPCS))
5782 skb->ip_summed = CHECKSUM_UNNECESSARY; 5833 skb->ip_summed = CHECKSUM_UNNECESSARY;
5783 5834
5784 dev_dbg(ring->dev, "cksum success: bits %08X\n", status_err); 5835 dev_dbg(ring->dev, "cksum success: bits %08X\n",
5836 le32_to_cpu(rx_desc->wb.upper.status_error));
5837}
5838
5839static inline void igb_rx_hash(struct igb_ring *ring,
5840 union e1000_adv_rx_desc *rx_desc,
5841 struct sk_buff *skb)
5842{
5843 if (ring->netdev->features & NETIF_F_RXHASH)
5844 skb->rxhash = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
5785} 5845}
5786 5846
5787static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5847static void igb_rx_hwtstamp(struct igb_q_vector *q_vector,
5788 struct sk_buff *skb) 5848 union e1000_adv_rx_desc *rx_desc,
5849 struct sk_buff *skb)
5789{ 5850{
5790 struct igb_adapter *adapter = q_vector->adapter; 5851 struct igb_adapter *adapter = q_vector->adapter;
5791 struct e1000_hw *hw = &adapter->hw; 5852 struct e1000_hw *hw = &adapter->hw;
5792 u64 regval; 5853 u64 regval;
5793 5854
5855 if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
5856 E1000_RXDADV_STAT_TS))
5857 return;
5858
5794 /* 5859 /*
5795 * If this bit is set, then the RX registers contain the time stamp. No 5860 * If this bit is set, then the RX registers contain the time stamp. No
5796 * other packet will be time stamped until we read these registers, so 5861 * other packet will be time stamped until we read these registers, so
@@ -5802,7 +5867,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5802 * If nothing went wrong, then it should have a shared tx_flags that we 5867 * If nothing went wrong, then it should have a shared tx_flags that we
5803 * can turn into a skb_shared_hwtstamps. 5868 * can turn into a skb_shared_hwtstamps.
5804 */ 5869 */
5805 if (staterr & E1000_RXDADV_STAT_TSIP) { 5870 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
5806 u32 *stamp = (u32 *)skb->data; 5871 u32 *stamp = (u32 *)skb->data;
5807 regval = le32_to_cpu(*(stamp + 2)); 5872 regval = le32_to_cpu(*(stamp + 2));
5808 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32; 5873 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
@@ -5832,18 +5897,16 @@ static inline u16 igb_get_hlen(union e1000_adv_rx_desc *rx_desc)
5832 5897
5833static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) 5898static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
5834{ 5899{
5835 struct igb_ring *rx_ring = q_vector->rx_ring; 5900 struct igb_ring *rx_ring = q_vector->rx.ring;
5836 union e1000_adv_rx_desc *rx_desc; 5901 union e1000_adv_rx_desc *rx_desc;
5837 const int current_node = numa_node_id(); 5902 const int current_node = numa_node_id();
5838 unsigned int total_bytes = 0, total_packets = 0; 5903 unsigned int total_bytes = 0, total_packets = 0;
5839 u32 staterr;
5840 u16 cleaned_count = igb_desc_unused(rx_ring); 5904 u16 cleaned_count = igb_desc_unused(rx_ring);
5841 u16 i = rx_ring->next_to_clean; 5905 u16 i = rx_ring->next_to_clean;
5842 5906
5843 rx_desc = IGB_RX_DESC(rx_ring, i); 5907 rx_desc = IGB_RX_DESC(rx_ring, i);
5844 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5845 5908
5846 while (staterr & E1000_RXD_STAT_DD) { 5909 while (igb_test_staterr(rx_desc, E1000_RXD_STAT_DD)) {
5847 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i]; 5910 struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
5848 struct sk_buff *skb = buffer_info->skb; 5911 struct sk_buff *skb = buffer_info->skb;
5849 union e1000_adv_rx_desc *next_rxd; 5912 union e1000_adv_rx_desc *next_rxd;
@@ -5896,7 +5959,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
5896 buffer_info->page_dma = 0; 5959 buffer_info->page_dma = 0;
5897 } 5960 }
5898 5961
5899 if (!(staterr & E1000_RXD_STAT_EOP)) { 5962 if (!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)) {
5900 struct igb_rx_buffer *next_buffer; 5963 struct igb_rx_buffer *next_buffer;
5901 next_buffer = &rx_ring->rx_buffer_info[i]; 5964 next_buffer = &rx_ring->rx_buffer_info[i];
5902 buffer_info->skb = next_buffer->skb; 5965 buffer_info->skb = next_buffer->skb;
@@ -5906,25 +5969,27 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
5906 goto next_desc; 5969 goto next_desc;
5907 } 5970 }
5908 5971
5909 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 5972 if (igb_test_staterr(rx_desc,
5973 E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
5910 dev_kfree_skb_any(skb); 5974 dev_kfree_skb_any(skb);
5911 goto next_desc; 5975 goto next_desc;
5912 } 5976 }
5913 5977
5914 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS)) 5978 igb_rx_hwtstamp(q_vector, rx_desc, skb);
5915 igb_rx_hwtstamp(q_vector, staterr, skb); 5979 igb_rx_hash(rx_ring, rx_desc, skb);
5916 total_bytes += skb->len; 5980 igb_rx_checksum(rx_ring, rx_desc, skb);
5917 total_packets++;
5918
5919 igb_rx_checksum(rx_ring, staterr, skb);
5920
5921 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
5922 5981
5923 if (staterr & E1000_RXD_STAT_VP) { 5982 if (igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
5924 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 5983 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
5925 5984
5926 __vlan_hwaccel_put_tag(skb, vid); 5985 __vlan_hwaccel_put_tag(skb, vid);
5927 } 5986 }
5987
5988 total_bytes += skb->len;
5989 total_packets++;
5990
5991 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
5992
5928 napi_gro_receive(&q_vector->napi, skb); 5993 napi_gro_receive(&q_vector->napi, skb);
5929 5994
5930 budget--; 5995 budget--;
@@ -5941,7 +6006,6 @@ next_desc:
5941 6006
5942 /* use prefetched values */ 6007 /* use prefetched values */
5943 rx_desc = next_rxd; 6008 rx_desc = next_rxd;
5944 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
5945 } 6009 }
5946 6010
5947 rx_ring->next_to_clean = i; 6011 rx_ring->next_to_clean = i;
@@ -5949,8 +6013,8 @@ next_desc:
5949 rx_ring->rx_stats.packets += total_packets; 6013 rx_ring->rx_stats.packets += total_packets;
5950 rx_ring->rx_stats.bytes += total_bytes; 6014 rx_ring->rx_stats.bytes += total_bytes;
5951 u64_stats_update_end(&rx_ring->rx_syncp); 6015 u64_stats_update_end(&rx_ring->rx_syncp);
5952 rx_ring->total_packets += total_packets; 6016 q_vector->rx.total_packets += total_packets;
5953 rx_ring->total_bytes += total_bytes; 6017 q_vector->rx.total_bytes += total_bytes;
5954 6018
5955 if (cleaned_count) 6019 if (cleaned_count)
5956 igb_alloc_rx_buffers(rx_ring, cleaned_count); 6020 igb_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -6336,10 +6400,9 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
6336 struct igb_adapter *adapter = netdev_priv(netdev); 6400 struct igb_adapter *adapter = netdev_priv(netdev);
6337 struct e1000_hw *hw = &adapter->hw; 6401 struct e1000_hw *hw = &adapter->hw;
6338 u32 ctrl, rctl; 6402 u32 ctrl, rctl;
6403 bool enable = !!(features & NETIF_F_HW_VLAN_RX);
6339 6404
6340 igb_irq_disable(adapter); 6405 if (enable) {
6341
6342 if (features & NETIF_F_HW_VLAN_RX) {
6343 /* enable VLAN tag insert/strip */ 6406 /* enable VLAN tag insert/strip */
6344 ctrl = rd32(E1000_CTRL); 6407 ctrl = rd32(E1000_CTRL);
6345 ctrl |= E1000_CTRL_VME; 6408 ctrl |= E1000_CTRL_VME;
@@ -6357,9 +6420,6 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
6357 } 6420 }
6358 6421
6359 igb_rlpml_set(adapter); 6422 igb_rlpml_set(adapter);
6360
6361 if (!test_bit(__IGB_DOWN, &adapter->state))
6362 igb_irq_enable(adapter);
6363} 6423}
6364 6424
6365static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 6425static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
@@ -6384,11 +6444,6 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
6384 int pf_id = adapter->vfs_allocated_count; 6444 int pf_id = adapter->vfs_allocated_count;
6385 s32 err; 6445 s32 err;
6386 6446
6387 igb_irq_disable(adapter);
6388
6389 if (!test_bit(__IGB_DOWN, &adapter->state))
6390 igb_irq_enable(adapter);
6391
6392 /* remove vlan from VLVF table array */ 6447 /* remove vlan from VLVF table array */
6393 err = igb_vlvf_set(adapter, vid, false, pf_id); 6448 err = igb_vlvf_set(adapter, vid, false, pf_id);
6394 6449
@@ -6403,6 +6458,8 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6403{ 6458{
6404 u16 vid; 6459 u16 vid;
6405 6460
6461 igb_vlan_mode(adapter->netdev, adapter->netdev->features);
6462
6406 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) 6463 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
6407 igb_vlan_rx_add_vid(adapter->netdev, vid); 6464 igb_vlan_rx_add_vid(adapter->netdev, vid);
6408} 6465}