aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/igb/igb_main.c
diff options
context:
space:
mode:
authorJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-02-23 02:29:56 -0500
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>2013-04-18 19:40:25 -0400
commitb980ac18c95f3251038da7a3826370aff05a7434 (patch)
tree938b3acb07b97963db1006e61933a02b77ac3dca /drivers/net/ethernet/intel/igb/igb_main.c
parentc8268921d443bd5c0c9b8fd7193d00533638ec03 (diff)
igb: Fix code comments and whitespace
Aligns the multi-line code comments with the desired style for the networking tree. Also cleaned up whitespace issues found during the cleanup of code comments (i.e. remove unnecessary blank lines, use tabs where possible, properly wrap lines and keep strings on a single line) Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Diffstat (limited to 'drivers/net/ethernet/intel/igb/igb_main.c')
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c1139
1 files changed, 564 insertions, 575 deletions
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index d838ab1ea96f..c54ba4224ac6 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -292,9 +292,7 @@ static const struct igb_reg_info igb_reg_info_tbl[] = {
292 {} 292 {}
293}; 293};
294 294
295/* 295/* igb_regdump - register printout routine */
296 * igb_regdump - register printout routine
297 */
298static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo) 296static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
299{ 297{
300 int n = 0; 298 int n = 0;
@@ -360,9 +358,7 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
360 regs[2], regs[3]); 358 regs[2], regs[3]);
361} 359}
362 360
363/* 361/* igb_dump - Print registers, Tx-rings and Rx-rings */
364 * igb_dump - Print registers, tx-rings and rx-rings
365 */
366static void igb_dump(struct igb_adapter *adapter) 362static void igb_dump(struct igb_adapter *adapter)
367{ 363{
368 struct net_device *netdev = adapter->netdev; 364 struct net_device *netdev = adapter->netdev;
@@ -569,12 +565,13 @@ exit:
569 return; 565 return;
570} 566}
571 567
572/* igb_get_i2c_data - Reads the I2C SDA data bit 568/**
569 * igb_get_i2c_data - Reads the I2C SDA data bit
573 * @hw: pointer to hardware structure 570 * @hw: pointer to hardware structure
574 * @i2cctl: Current value of I2CCTL register 571 * @i2cctl: Current value of I2CCTL register
575 * 572 *
576 * Returns the I2C data bit value 573 * Returns the I2C data bit value
577 */ 574 **/
578static int igb_get_i2c_data(void *data) 575static int igb_get_i2c_data(void *data)
579{ 576{
580 struct igb_adapter *adapter = (struct igb_adapter *)data; 577 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -584,12 +581,13 @@ static int igb_get_i2c_data(void *data)
584 return ((i2cctl & E1000_I2C_DATA_IN) != 0); 581 return ((i2cctl & E1000_I2C_DATA_IN) != 0);
585} 582}
586 583
587/* igb_set_i2c_data - Sets the I2C data bit 584/**
585 * igb_set_i2c_data - Sets the I2C data bit
588 * @data: pointer to hardware structure 586 * @data: pointer to hardware structure
589 * @state: I2C data value (0 or 1) to set 587 * @state: I2C data value (0 or 1) to set
590 * 588 *
591 * Sets the I2C data bit 589 * Sets the I2C data bit
592 */ 590 **/
593static void igb_set_i2c_data(void *data, int state) 591static void igb_set_i2c_data(void *data, int state)
594{ 592{
595 struct igb_adapter *adapter = (struct igb_adapter *)data; 593 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -608,12 +606,13 @@ static void igb_set_i2c_data(void *data, int state)
608 606
609} 607}
610 608
611/* igb_set_i2c_clk - Sets the I2C SCL clock 609/**
610 * igb_set_i2c_clk - Sets the I2C SCL clock
612 * @data: pointer to hardware structure 611 * @data: pointer to hardware structure
613 * @state: state to set clock 612 * @state: state to set clock
614 * 613 *
615 * Sets the I2C clock line to state 614 * Sets the I2C clock line to state
616 */ 615 **/
617static void igb_set_i2c_clk(void *data, int state) 616static void igb_set_i2c_clk(void *data, int state)
618{ 617{
619 struct igb_adapter *adapter = (struct igb_adapter *)data; 618 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -631,11 +630,12 @@ static void igb_set_i2c_clk(void *data, int state)
631 wrfl(); 630 wrfl();
632} 631}
633 632
634/* igb_get_i2c_clk - Gets the I2C SCL clock state 633/**
634 * igb_get_i2c_clk - Gets the I2C SCL clock state
635 * @data: pointer to hardware structure 635 * @data: pointer to hardware structure
636 * 636 *
637 * Gets the I2C clock state 637 * Gets the I2C clock state
638 */ 638 **/
639static int igb_get_i2c_clk(void *data) 639static int igb_get_i2c_clk(void *data)
640{ 640{
641 struct igb_adapter *adapter = (struct igb_adapter *)data; 641 struct igb_adapter *adapter = (struct igb_adapter *)data;
@@ -655,8 +655,10 @@ static const struct i2c_algo_bit_data igb_i2c_algo = {
655}; 655};
656 656
657/** 657/**
658 * igb_get_hw_dev - return device 658 * igb_get_hw_dev - return device
659 * used by hardware layer to print debugging information 659 * @hw: pointer to hardware structure
660 *
661 * used by hardware layer to print debugging information
660 **/ 662 **/
661struct net_device *igb_get_hw_dev(struct e1000_hw *hw) 663struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
662{ 664{
@@ -665,10 +667,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
665} 667}
666 668
667/** 669/**
668 * igb_init_module - Driver Registration Routine 670 * igb_init_module - Driver Registration Routine
669 * 671 *
670 * igb_init_module is the first routine called when the driver is 672 * igb_init_module is the first routine called when the driver is
671 * loaded. All it does is register with the PCI subsystem. 673 * loaded. All it does is register with the PCI subsystem.
672 **/ 674 **/
673static int __init igb_init_module(void) 675static int __init igb_init_module(void)
674{ 676{
@@ -688,10 +690,10 @@ static int __init igb_init_module(void)
688module_init(igb_init_module); 690module_init(igb_init_module);
689 691
690/** 692/**
691 * igb_exit_module - Driver Exit Cleanup Routine 693 * igb_exit_module - Driver Exit Cleanup Routine
692 * 694 *
693 * igb_exit_module is called just before the driver is removed 695 * igb_exit_module is called just before the driver is removed
694 * from memory. 696 * from memory.
695 **/ 697 **/
696static void __exit igb_exit_module(void) 698static void __exit igb_exit_module(void)
697{ 699{
@@ -705,11 +707,11 @@ module_exit(igb_exit_module);
705 707
706#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1)) 708#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
707/** 709/**
708 * igb_cache_ring_register - Descriptor ring to register mapping 710 * igb_cache_ring_register - Descriptor ring to register mapping
709 * @adapter: board private structure to initialize 711 * @adapter: board private structure to initialize
710 * 712 *
711 * Once we know the feature-set enabled for the device, we'll cache 713 * Once we know the feature-set enabled for the device, we'll cache
712 * the register offset the descriptor ring is assigned to. 714 * the register offset the descriptor ring is assigned to.
713 **/ 715 **/
714static void igb_cache_ring_register(struct igb_adapter *adapter) 716static void igb_cache_ring_register(struct igb_adapter *adapter)
715{ 717{
@@ -726,7 +728,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
726 if (adapter->vfs_allocated_count) { 728 if (adapter->vfs_allocated_count) {
727 for (; i < adapter->rss_queues; i++) 729 for (; i < adapter->rss_queues; i++)
728 adapter->rx_ring[i]->reg_idx = rbase_offset + 730 adapter->rx_ring[i]->reg_idx = rbase_offset +
729 Q_IDX_82576(i); 731 Q_IDX_82576(i);
730 } 732 }
731 case e1000_82575: 733 case e1000_82575:
732 case e1000_82580: 734 case e1000_82580:
@@ -785,9 +787,10 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
785 switch (hw->mac.type) { 787 switch (hw->mac.type) {
786 case e1000_82575: 788 case e1000_82575:
787 /* The 82575 assigns vectors using a bitmask, which matches the 789 /* The 82575 assigns vectors using a bitmask, which matches the
788 bitmask for the EICR/EIMS/EIMC registers. To assign one 790 * bitmask for the EICR/EIMS/EIMC registers. To assign one
789 or more queues to a vector, we write the appropriate bits 791 * or more queues to a vector, we write the appropriate bits
790 into the MSIXBM register for that vector. */ 792 * into the MSIXBM register for that vector.
793 */
791 if (rx_queue > IGB_N0_QUEUE) 794 if (rx_queue > IGB_N0_QUEUE)
792 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 795 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
793 if (tx_queue > IGB_N0_QUEUE) 796 if (tx_queue > IGB_N0_QUEUE)
@@ -798,8 +801,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
798 q_vector->eims_value = msixbm; 801 q_vector->eims_value = msixbm;
799 break; 802 break;
800 case e1000_82576: 803 case e1000_82576:
801 /* 804 /* 82576 uses a table that essentially consists of 2 columns
802 * 82576 uses a table that essentially consists of 2 columns
803 * with 8 rows. The ordering is column-major so we use the 805 * with 8 rows. The ordering is column-major so we use the
804 * lower 3 bits as the row index, and the 4th bit as the 806 * lower 3 bits as the row index, and the 4th bit as the
805 * column offset. 807 * column offset.
@@ -818,8 +820,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
818 case e1000_i350: 820 case e1000_i350:
819 case e1000_i210: 821 case e1000_i210:
820 case e1000_i211: 822 case e1000_i211:
821 /* 823 /* On 82580 and newer adapters the scheme is similar to 82576
822 * On 82580 and newer adapters the scheme is similar to 82576
823 * however instead of ordering column-major we have things 824 * however instead of ordering column-major we have things
824 * ordered row-major. So we traverse the table by using 825 * ordered row-major. So we traverse the table by using
825 * bit 0 as the column offset, and the remaining bits as the 826 * bit 0 as the column offset, and the remaining bits as the
@@ -848,10 +849,11 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
848} 849}
849 850
850/** 851/**
851 * igb_configure_msix - Configure MSI-X hardware 852 * igb_configure_msix - Configure MSI-X hardware
853 * @adapter: board private structure to initialize
852 * 854 *
853 * igb_configure_msix sets up the hardware to properly 855 * igb_configure_msix sets up the hardware to properly
854 * generate MSI-X interrupts. 856 * generate MSI-X interrupts.
855 **/ 857 **/
856static void igb_configure_msix(struct igb_adapter *adapter) 858static void igb_configure_msix(struct igb_adapter *adapter)
857{ 859{
@@ -875,8 +877,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
875 wr32(E1000_CTRL_EXT, tmp); 877 wr32(E1000_CTRL_EXT, tmp);
876 878
877 /* enable msix_other interrupt */ 879 /* enable msix_other interrupt */
878 array_wr32(E1000_MSIXBM(0), vector++, 880 array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
879 E1000_EIMS_OTHER);
880 adapter->eims_other = E1000_EIMS_OTHER; 881 adapter->eims_other = E1000_EIMS_OTHER;
881 882
882 break; 883 break;
@@ -887,10 +888,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
887 case e1000_i210: 888 case e1000_i210:
888 case e1000_i211: 889 case e1000_i211:
889 /* Turn on MSI-X capability first, or our settings 890 /* Turn on MSI-X capability first, or our settings
890 * won't stick. And it will take days to debug. */ 891 * won't stick. And it will take days to debug.
892 */
891 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 893 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
892 E1000_GPIE_PBA | E1000_GPIE_EIAME | 894 E1000_GPIE_PBA | E1000_GPIE_EIAME |
893 E1000_GPIE_NSICR); 895 E1000_GPIE_NSICR);
894 896
895 /* enable msix_other interrupt */ 897 /* enable msix_other interrupt */
896 adapter->eims_other = 1 << vector; 898 adapter->eims_other = 1 << vector;
@@ -912,10 +914,11 @@ static void igb_configure_msix(struct igb_adapter *adapter)
912} 914}
913 915
914/** 916/**
915 * igb_request_msix - Initialize MSI-X interrupts 917 * igb_request_msix - Initialize MSI-X interrupts
918 * @adapter: board private structure to initialize
916 * 919 *
917 * igb_request_msix allocates MSI-X vectors and requests interrupts from the 920 * igb_request_msix allocates MSI-X vectors and requests interrupts from the
918 * kernel. 921 * kernel.
919 **/ 922 **/
920static int igb_request_msix(struct igb_adapter *adapter) 923static int igb_request_msix(struct igb_adapter *adapter)
921{ 924{
@@ -924,7 +927,7 @@ static int igb_request_msix(struct igb_adapter *adapter)
924 int i, err = 0, vector = 0, free_vector = 0; 927 int i, err = 0, vector = 0, free_vector = 0;
925 928
926 err = request_irq(adapter->msix_entries[vector].vector, 929 err = request_irq(adapter->msix_entries[vector].vector,
927 igb_msix_other, 0, netdev->name, adapter); 930 igb_msix_other, 0, netdev->name, adapter);
928 if (err) 931 if (err)
929 goto err_out; 932 goto err_out;
930 933
@@ -948,8 +951,8 @@ static int igb_request_msix(struct igb_adapter *adapter)
948 sprintf(q_vector->name, "%s-unused", netdev->name); 951 sprintf(q_vector->name, "%s-unused", netdev->name);
949 952
950 err = request_irq(adapter->msix_entries[vector].vector, 953 err = request_irq(adapter->msix_entries[vector].vector,
951 igb_msix_ring, 0, q_vector->name, 954 igb_msix_ring, 0, q_vector->name,
952 q_vector); 955 q_vector);
953 if (err) 956 if (err)
954 goto err_free; 957 goto err_free;
955 } 958 }
@@ -982,13 +985,13 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
982} 985}
983 986
984/** 987/**
985 * igb_free_q_vector - Free memory allocated for specific interrupt vector 988 * igb_free_q_vector - Free memory allocated for specific interrupt vector
986 * @adapter: board private structure to initialize 989 * @adapter: board private structure to initialize
987 * @v_idx: Index of vector to be freed 990 * @v_idx: Index of vector to be freed
988 * 991 *
989 * This function frees the memory allocated to the q_vector. In addition if 992 * This function frees the memory allocated to the q_vector. In addition if
990 * NAPI is enabled it will delete any references to the NAPI struct prior 993 * NAPI is enabled it will delete any references to the NAPI struct prior
991 * to freeing the q_vector. 994 * to freeing the q_vector.
992 **/ 995 **/
993static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx) 996static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
994{ 997{
@@ -1003,20 +1006,19 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1003 adapter->q_vector[v_idx] = NULL; 1006 adapter->q_vector[v_idx] = NULL;
1004 netif_napi_del(&q_vector->napi); 1007 netif_napi_del(&q_vector->napi);
1005 1008
1006 /* 1009 /* ixgbe_get_stats64() might access the rings on this vector,
1007 * ixgbe_get_stats64() might access the rings on this vector,
1008 * we must wait a grace period before freeing it. 1010 * we must wait a grace period before freeing it.
1009 */ 1011 */
1010 kfree_rcu(q_vector, rcu); 1012 kfree_rcu(q_vector, rcu);
1011} 1013}
1012 1014
1013/** 1015/**
1014 * igb_free_q_vectors - Free memory allocated for interrupt vectors 1016 * igb_free_q_vectors - Free memory allocated for interrupt vectors
1015 * @adapter: board private structure to initialize 1017 * @adapter: board private structure to initialize
1016 * 1018 *
1017 * This function frees the memory allocated to the q_vectors. In addition if 1019 * This function frees the memory allocated to the q_vectors. In addition if
1018 * NAPI is enabled it will delete any references to the NAPI struct prior 1020 * NAPI is enabled it will delete any references to the NAPI struct prior
1019 * to freeing the q_vector. 1021 * to freeing the q_vector.
1020 **/ 1022 **/
1021static void igb_free_q_vectors(struct igb_adapter *adapter) 1023static void igb_free_q_vectors(struct igb_adapter *adapter)
1022{ 1024{
@@ -1031,10 +1033,11 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
1031} 1033}
1032 1034
1033/** 1035/**
1034 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts 1036 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
1037 * @adapter: board private structure to initialize
1035 * 1038 *
1036 * This function resets the device so that it has 0 rx queues, tx queues, and 1039 * This function resets the device so that it has 0 Rx queues, Tx queues, and
1037 * MSI-X interrupts allocated. 1040 * MSI-X interrupts allocated.
1038 */ 1041 */
1039static void igb_clear_interrupt_scheme(struct igb_adapter *adapter) 1042static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1040{ 1043{
@@ -1043,10 +1046,12 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
1043} 1046}
1044 1047
1045/** 1048/**
1046 * igb_set_interrupt_capability - set MSI or MSI-X if supported 1049 * igb_set_interrupt_capability - set MSI or MSI-X if supported
1050 * @adapter: board private structure to initialize
1051 * @msix: boolean value of MSIX capability
1047 * 1052 *
1048 * Attempt to configure interrupts using the best available 1053 * Attempt to configure interrupts using the best available
1049 * capabilities of the hardware and kernel. 1054 * capabilities of the hardware and kernel.
1050 **/ 1055 **/
1051static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix) 1056static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1052{ 1057{
@@ -1063,10 +1068,10 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
1063 else 1068 else
1064 adapter->num_tx_queues = adapter->rss_queues; 1069 adapter->num_tx_queues = adapter->rss_queues;
1065 1070
1066 /* start with one vector for every rx queue */ 1071 /* start with one vector for every Rx queue */
1067 numvecs = adapter->num_rx_queues; 1072 numvecs = adapter->num_rx_queues;
1068 1073
1069 /* if tx handler is separate add 1 for every tx queue */ 1074 /* if Tx handler is separate add 1 for every Tx queue */
1070 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) 1075 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
1071 numvecs += adapter->num_tx_queues; 1076 numvecs += adapter->num_tx_queues;
1072 1077
@@ -1128,16 +1133,16 @@ static void igb_add_ring(struct igb_ring *ring,
1128} 1133}
1129 1134
1130/** 1135/**
1131 * igb_alloc_q_vector - Allocate memory for a single interrupt vector 1136 * igb_alloc_q_vector - Allocate memory for a single interrupt vector
1132 * @adapter: board private structure to initialize 1137 * @adapter: board private structure to initialize
1133 * @v_count: q_vectors allocated on adapter, used for ring interleaving 1138 * @v_count: q_vectors allocated on adapter, used for ring interleaving
1134 * @v_idx: index of vector in adapter struct 1139 * @v_idx: index of vector in adapter struct
1135 * @txr_count: total number of Tx rings to allocate 1140 * @txr_count: total number of Tx rings to allocate
1136 * @txr_idx: index of first Tx ring to allocate 1141 * @txr_idx: index of first Tx ring to allocate
1137 * @rxr_count: total number of Rx rings to allocate 1142 * @rxr_count: total number of Rx rings to allocate
1138 * @rxr_idx: index of first Rx ring to allocate 1143 * @rxr_idx: index of first Rx ring to allocate
1139 * 1144 *
1140 * We allocate one q_vector. If allocation fails we return -ENOMEM. 1145 * We allocate one q_vector. If allocation fails we return -ENOMEM.
1141 **/ 1146 **/
1142static int igb_alloc_q_vector(struct igb_adapter *adapter, 1147static int igb_alloc_q_vector(struct igb_adapter *adapter,
1143 int v_count, int v_idx, 1148 int v_count, int v_idx,
@@ -1231,10 +1236,9 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1231 if (adapter->hw.mac.type >= e1000_82576) 1236 if (adapter->hw.mac.type >= e1000_82576)
1232 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags); 1237 set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
1233 1238
1234 /* 1239 /* On i350, i210, and i211, loopback VLAN packets
1235 * On i350, i210, and i211, loopback VLAN packets
1236 * have the tag byte-swapped. 1240 * have the tag byte-swapped.
1237 * */ 1241 */
1238 if (adapter->hw.mac.type >= e1000_i350) 1242 if (adapter->hw.mac.type >= e1000_i350)
1239 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags); 1243 set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
1240 1244
@@ -1251,11 +1255,11 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
1251 1255
1252 1256
1253/** 1257/**
1254 * igb_alloc_q_vectors - Allocate memory for interrupt vectors 1258 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
1255 * @adapter: board private structure to initialize 1259 * @adapter: board private structure to initialize
1256 * 1260 *
1257 * We allocate one q_vector per queue interrupt. If allocation fails we 1261 * We allocate one q_vector per queue interrupt. If allocation fails we
1258 * return -ENOMEM. 1262 * return -ENOMEM.
1259 **/ 1263 **/
1260static int igb_alloc_q_vectors(struct igb_adapter *adapter) 1264static int igb_alloc_q_vectors(struct igb_adapter *adapter)
1261{ 1265{
@@ -1309,9 +1313,11 @@ err_out:
1309} 1313}
1310 1314
1311/** 1315/**
1312 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors 1316 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
1317 * @adapter: board private structure to initialize
1318 * @msix: boolean value of MSIX capability
1313 * 1319 *
1314 * This function initializes the interrupts and allocates all of the queues. 1320 * This function initializes the interrupts and allocates all of the queues.
1315 **/ 1321 **/
1316static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix) 1322static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
1317{ 1323{
@@ -1336,10 +1342,11 @@ err_alloc_q_vectors:
1336} 1342}
1337 1343
1338/** 1344/**
1339 * igb_request_irq - initialize interrupts 1345 * igb_request_irq - initialize interrupts
1346 * @adapter: board private structure to initialize
1340 * 1347 *
1341 * Attempts to configure interrupts using the best available 1348 * Attempts to configure interrupts using the best available
1342 * capabilities of the hardware and kernel. 1349 * capabilities of the hardware and kernel.
1343 **/ 1350 **/
1344static int igb_request_irq(struct igb_adapter *adapter) 1351static int igb_request_irq(struct igb_adapter *adapter)
1345{ 1352{
@@ -1405,15 +1412,14 @@ static void igb_free_irq(struct igb_adapter *adapter)
1405} 1412}
1406 1413
1407/** 1414/**
1408 * igb_irq_disable - Mask off interrupt generation on the NIC 1415 * igb_irq_disable - Mask off interrupt generation on the NIC
1409 * @adapter: board private structure 1416 * @adapter: board private structure
1410 **/ 1417 **/
1411static void igb_irq_disable(struct igb_adapter *adapter) 1418static void igb_irq_disable(struct igb_adapter *adapter)
1412{ 1419{
1413 struct e1000_hw *hw = &adapter->hw; 1420 struct e1000_hw *hw = &adapter->hw;
1414 1421
1415 /* 1422 /* we need to be careful when disabling interrupts. The VFs are also
1416 * we need to be careful when disabling interrupts. The VFs are also
1417 * mapped into these registers and so clearing the bits can cause 1423 * mapped into these registers and so clearing the bits can cause
1418 * issues on the VF drivers so we only need to clear what we set 1424 * issues on the VF drivers so we only need to clear what we set
1419 */ 1425 */
@@ -1438,8 +1444,8 @@ static void igb_irq_disable(struct igb_adapter *adapter)
1438} 1444}
1439 1445
1440/** 1446/**
1441 * igb_irq_enable - Enable default interrupt generation settings 1447 * igb_irq_enable - Enable default interrupt generation settings
1442 * @adapter: board private structure 1448 * @adapter: board private structure
1443 **/ 1449 **/
1444static void igb_irq_enable(struct igb_adapter *adapter) 1450static void igb_irq_enable(struct igb_adapter *adapter)
1445{ 1451{
@@ -1488,13 +1494,12 @@ static void igb_update_mng_vlan(struct igb_adapter *adapter)
1488} 1494}
1489 1495
1490/** 1496/**
1491 * igb_release_hw_control - release control of the h/w to f/w 1497 * igb_release_hw_control - release control of the h/w to f/w
1492 * @adapter: address of board private structure 1498 * @adapter: address of board private structure
1493 *
1494 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1495 * For ASF and Pass Through versions of f/w this means that the
1496 * driver is no longer loaded.
1497 * 1499 *
1500 * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
1501 * For ASF and Pass Through versions of f/w this means that the
1502 * driver is no longer loaded.
1498 **/ 1503 **/
1499static void igb_release_hw_control(struct igb_adapter *adapter) 1504static void igb_release_hw_control(struct igb_adapter *adapter)
1500{ 1505{
@@ -1508,13 +1513,12 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
1508} 1513}
1509 1514
1510/** 1515/**
1511 * igb_get_hw_control - get control of the h/w from f/w 1516 * igb_get_hw_control - get control of the h/w from f/w
1512 * @adapter: address of board private structure 1517 * @adapter: address of board private structure
1513 *
1514 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1515 * For ASF and Pass Through versions of f/w this means that
1516 * the driver is loaded.
1517 * 1518 *
1519 * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
1520 * For ASF and Pass Through versions of f/w this means that
1521 * the driver is loaded.
1518 **/ 1522 **/
1519static void igb_get_hw_control(struct igb_adapter *adapter) 1523static void igb_get_hw_control(struct igb_adapter *adapter)
1520{ 1524{
@@ -1528,8 +1532,8 @@ static void igb_get_hw_control(struct igb_adapter *adapter)
1528} 1532}
1529 1533
1530/** 1534/**
1531 * igb_configure - configure the hardware for RX and TX 1535 * igb_configure - configure the hardware for RX and TX
1532 * @adapter: private board structure 1536 * @adapter: private board structure
1533 **/ 1537 **/
1534static void igb_configure(struct igb_adapter *adapter) 1538static void igb_configure(struct igb_adapter *adapter)
1535{ 1539{
@@ -1552,7 +1556,8 @@ static void igb_configure(struct igb_adapter *adapter)
1552 1556
1553 /* call igb_desc_unused which always leaves 1557 /* call igb_desc_unused which always leaves
1554 * at least 1 descriptor unused to make sure 1558 * at least 1 descriptor unused to make sure
1555 * next_to_use != next_to_clean */ 1559 * next_to_use != next_to_clean
1560 */
1556 for (i = 0; i < adapter->num_rx_queues; i++) { 1561 for (i = 0; i < adapter->num_rx_queues; i++) {
1557 struct igb_ring *ring = adapter->rx_ring[i]; 1562 struct igb_ring *ring = adapter->rx_ring[i];
1558 igb_alloc_rx_buffers(ring, igb_desc_unused(ring)); 1563 igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
@@ -1560,8 +1565,8 @@ static void igb_configure(struct igb_adapter *adapter)
1560} 1565}
1561 1566
1562/** 1567/**
1563 * igb_power_up_link - Power up the phy/serdes link 1568 * igb_power_up_link - Power up the phy/serdes link
1564 * @adapter: address of board private structure 1569 * @adapter: address of board private structure
1565 **/ 1570 **/
1566void igb_power_up_link(struct igb_adapter *adapter) 1571void igb_power_up_link(struct igb_adapter *adapter)
1567{ 1572{
@@ -1574,8 +1579,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
1574} 1579}
1575 1580
1576/** 1581/**
1577 * igb_power_down_link - Power down the phy/serdes link 1582 * igb_power_down_link - Power down the phy/serdes link
1578 * @adapter: address of board private structure 1583 * @adapter: address of board private structure
1579 */ 1584 */
1580static void igb_power_down_link(struct igb_adapter *adapter) 1585static void igb_power_down_link(struct igb_adapter *adapter)
1581{ 1586{
@@ -1586,8 +1591,8 @@ static void igb_power_down_link(struct igb_adapter *adapter)
1586} 1591}
1587 1592
1588/** 1593/**
1589 * igb_up - Open the interface and prepare it to handle traffic 1594 * igb_up - Open the interface and prepare it to handle traffic
1590 * @adapter: board private structure 1595 * @adapter: board private structure
1591 **/ 1596 **/
1592int igb_up(struct igb_adapter *adapter) 1597int igb_up(struct igb_adapter *adapter)
1593{ 1598{
@@ -1635,7 +1640,8 @@ void igb_down(struct igb_adapter *adapter)
1635 int i; 1640 int i;
1636 1641
1637 /* signal that we're down so the interrupt handler does not 1642 /* signal that we're down so the interrupt handler does not
1638 * reschedule our watchdog timer */ 1643 * reschedule our watchdog timer
1644 */
1639 set_bit(__IGB_DOWN, &adapter->state); 1645 set_bit(__IGB_DOWN, &adapter->state);
1640 1646
1641 /* disable receives in the hardware */ 1647 /* disable receives in the hardware */
@@ -1731,14 +1737,16 @@ void igb_reset(struct igb_adapter *adapter)
1731 * rounded up to the next 1KB and expressed in KB. Likewise, 1737 * rounded up to the next 1KB and expressed in KB. Likewise,
1732 * the Rx FIFO should be large enough to accommodate at least 1738 * the Rx FIFO should be large enough to accommodate at least
1733 * one full receive packet and is similarly rounded up and 1739 * one full receive packet and is similarly rounded up and
1734 * expressed in KB. */ 1740 * expressed in KB.
1741 */
1735 pba = rd32(E1000_PBA); 1742 pba = rd32(E1000_PBA);
1736 /* upper 16 bits has Tx packet buffer allocation size in KB */ 1743 /* upper 16 bits has Tx packet buffer allocation size in KB */
1737 tx_space = pba >> 16; 1744 tx_space = pba >> 16;
1738 /* lower 16 bits has Rx packet buffer allocation size in KB */ 1745 /* lower 16 bits has Rx packet buffer allocation size in KB */
1739 pba &= 0xffff; 1746 pba &= 0xffff;
1740 /* the tx fifo also stores 16 bytes of information about the tx 1747 /* the Tx fifo also stores 16 bytes of information about the Tx
1741 * but don't include ethernet FCS because hardware appends it */ 1748 * but don't include ethernet FCS because hardware appends it
1749 */
1742 min_tx_space = (adapter->max_frame_size + 1750 min_tx_space = (adapter->max_frame_size +
1743 sizeof(union e1000_adv_tx_desc) - 1751 sizeof(union e1000_adv_tx_desc) -
1744 ETH_FCS_LEN) * 2; 1752 ETH_FCS_LEN) * 2;
@@ -1751,13 +1759,15 @@ void igb_reset(struct igb_adapter *adapter)
1751 1759
1752 /* If current Tx allocation is less than the min Tx FIFO size, 1760 /* If current Tx allocation is less than the min Tx FIFO size,
1753 * and the min Tx FIFO size is less than the current Rx FIFO 1761 * and the min Tx FIFO size is less than the current Rx FIFO
1754 * allocation, take space away from current Rx allocation */ 1762 * allocation, take space away from current Rx allocation
1763 */
1755 if (tx_space < min_tx_space && 1764 if (tx_space < min_tx_space &&
1756 ((min_tx_space - tx_space) < pba)) { 1765 ((min_tx_space - tx_space) < pba)) {
1757 pba = pba - (min_tx_space - tx_space); 1766 pba = pba - (min_tx_space - tx_space);
1758 1767
1759 /* if short on rx space, rx wins and must trump tx 1768 /* if short on Rx space, Rx wins and must trump Tx
1760 * adjustment */ 1769 * adjustment
1770 */
1761 if (pba < min_rx_space) 1771 if (pba < min_rx_space)
1762 pba = min_rx_space; 1772 pba = min_rx_space;
1763 } 1773 }
@@ -1769,7 +1779,8 @@ void igb_reset(struct igb_adapter *adapter)
1769 * (or the size used for early receive) above it in the Rx FIFO. 1779 * (or the size used for early receive) above it in the Rx FIFO.
1770 * Set it to the lower of: 1780 * Set it to the lower of:
1771 * - 90% of the Rx FIFO size, or 1781 * - 90% of the Rx FIFO size, or
1772 * - the full Rx FIFO size minus one full frame */ 1782 * - the full Rx FIFO size minus one full frame
1783 */
1773 hwm = min(((pba << 10) * 9 / 10), 1784 hwm = min(((pba << 10) * 9 / 10),
1774 ((pba << 10) - 2 * adapter->max_frame_size)); 1785 ((pba << 10) - 2 * adapter->max_frame_size));
1775 1786
@@ -1800,8 +1811,7 @@ void igb_reset(struct igb_adapter *adapter)
1800 if (hw->mac.ops.init_hw(hw)) 1811 if (hw->mac.ops.init_hw(hw))
1801 dev_err(&pdev->dev, "Hardware Error\n"); 1812 dev_err(&pdev->dev, "Hardware Error\n");
1802 1813
1803 /* 1814 /* Flow control settings reset on hardware reset, so guarantee flow
1804 * Flow control settings reset on hardware reset, so guarantee flow
1805 * control is off when forcing speed. 1815 * control is off when forcing speed.
1806 */ 1816 */
1807 if (!hw->mac.autoneg) 1817 if (!hw->mac.autoneg)
@@ -1837,9 +1847,8 @@ void igb_reset(struct igb_adapter *adapter)
1837static netdev_features_t igb_fix_features(struct net_device *netdev, 1847static netdev_features_t igb_fix_features(struct net_device *netdev,
1838 netdev_features_t features) 1848 netdev_features_t features)
1839{ 1849{
1840 /* 1850 /* Since there is no support for separate Rx/Tx vlan accel
1841 * Since there is no support for separate rx/tx vlan accel 1851 * enable/disable make sure Tx flag is always in same state as Rx.
1842 * enable/disable make sure tx flag is always in same state as rx.
1843 */ 1852 */
1844 if (features & NETIF_F_HW_VLAN_RX) 1853 if (features & NETIF_F_HW_VLAN_RX)
1845 features |= NETIF_F_HW_VLAN_TX; 1854 features |= NETIF_F_HW_VLAN_TX;
@@ -1898,7 +1907,6 @@ static const struct net_device_ops igb_netdev_ops = {
1898/** 1907/**
1899 * igb_set_fw_version - Configure version string for ethtool 1908 * igb_set_fw_version - Configure version string for ethtool
1900 * @adapter: adapter struct 1909 * @adapter: adapter struct
1901 *
1902 **/ 1910 **/
1903void igb_set_fw_version(struct igb_adapter *adapter) 1911void igb_set_fw_version(struct igb_adapter *adapter)
1904{ 1912{
@@ -1934,10 +1942,10 @@ void igb_set_fw_version(struct igb_adapter *adapter)
1934 return; 1942 return;
1935} 1943}
1936 1944
1937/* igb_init_i2c - Init I2C interface 1945/**
1946 * igb_init_i2c - Init I2C interface
1938 * @adapter: pointer to adapter structure 1947 * @adapter: pointer to adapter structure
1939 * 1948 **/
1940 */
1941static s32 igb_init_i2c(struct igb_adapter *adapter) 1949static s32 igb_init_i2c(struct igb_adapter *adapter)
1942{ 1950{
1943 s32 status = E1000_SUCCESS; 1951 s32 status = E1000_SUCCESS;
@@ -1962,15 +1970,15 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
1962} 1970}
1963 1971
1964/** 1972/**
1965 * igb_probe - Device Initialization Routine 1973 * igb_probe - Device Initialization Routine
1966 * @pdev: PCI device information struct 1974 * @pdev: PCI device information struct
1967 * @ent: entry in igb_pci_tbl 1975 * @ent: entry in igb_pci_tbl
1968 * 1976 *
1969 * Returns 0 on success, negative on failure 1977 * Returns 0 on success, negative on failure
1970 * 1978 *
1971 * igb_probe initializes an adapter identified by a pci_dev structure. 1979 * igb_probe initializes an adapter identified by a pci_dev structure.
1972 * The OS initialization, configuring of the adapter private structure, 1980 * The OS initialization, configuring of the adapter private structure,
1973 * and a hardware reset occur. 1981 * and a hardware reset occur.
1974 **/ 1982 **/
1975static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 1983static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1976{ 1984{
@@ -2007,18 +2015,19 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2007 } else { 2015 } else {
2008 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); 2016 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
2009 if (err) { 2017 if (err) {
2010 err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 2018 err = dma_set_coherent_mask(&pdev->dev,
2019 DMA_BIT_MASK(32));
2011 if (err) { 2020 if (err) {
2012 dev_err(&pdev->dev, "No usable DMA " 2021 dev_err(&pdev->dev,
2013 "configuration, aborting\n"); 2022 "No usable DMA configuration, aborting\n");
2014 goto err_dma; 2023 goto err_dma;
2015 } 2024 }
2016 } 2025 }
2017 } 2026 }
2018 2027
2019 err = pci_request_selected_regions(pdev, pci_select_bars(pdev, 2028 err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
2020 IORESOURCE_MEM), 2029 IORESOURCE_MEM),
2021 igb_driver_name); 2030 igb_driver_name);
2022 if (err) 2031 if (err)
2023 goto err_pci_reg; 2032 goto err_pci_reg;
2024 2033
@@ -2096,8 +2105,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2096 dev_info(&pdev->dev, 2105 dev_info(&pdev->dev,
2097 "PHY reset is blocked due to SOL/IDER session.\n"); 2106 "PHY reset is blocked due to SOL/IDER session.\n");
2098 2107
2099 /* 2108 /* features is initialized to 0 in allocation, it might have bits
2100 * features is initialized to 0 in allocation, it might have bits
2101 * set by igb_sw_init so we should use an or instead of an 2109 * set by igb_sw_init so we should use an or instead of an
2102 * assignment. 2110 * assignment.
2103 */ 2111 */
@@ -2141,11 +2149,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2141 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw); 2149 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
2142 2150
2143 /* before reading the NVM, reset the controller to put the device in a 2151 /* before reading the NVM, reset the controller to put the device in a
2144 * known good starting state */ 2152 * known good starting state
2153 */
2145 hw->mac.ops.reset_hw(hw); 2154 hw->mac.ops.reset_hw(hw);
2146 2155
2147 /* 2156 /* make sure the NVM is good , i211 parts have special NVM that
2148 * make sure the NVM is good , i211 parts have special NVM that
2149 * doesn't contain a checksum 2157 * doesn't contain a checksum
2150 */ 2158 */
2151 if (hw->mac.type != e1000_i211) { 2159 if (hw->mac.type != e1000_i211) {
@@ -2172,9 +2180,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2172 igb_set_fw_version(adapter); 2180 igb_set_fw_version(adapter);
2173 2181
2174 setup_timer(&adapter->watchdog_timer, igb_watchdog, 2182 setup_timer(&adapter->watchdog_timer, igb_watchdog,
2175 (unsigned long) adapter); 2183 (unsigned long) adapter);
2176 setup_timer(&adapter->phy_info_timer, igb_update_phy_info, 2184 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
2177 (unsigned long) adapter); 2185 (unsigned long) adapter);
2178 2186
2179 INIT_WORK(&adapter->reset_task, igb_reset_task); 2187 INIT_WORK(&adapter->reset_task, igb_reset_task);
2180 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task); 2188 INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
@@ -2196,8 +2204,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2196 /* Check the NVM for wake support on non-port A ports */ 2204 /* Check the NVM for wake support on non-port A ports */
2197 if (hw->mac.type >= e1000_82580) 2205 if (hw->mac.type >= e1000_82580)
2198 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A + 2206 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
2199 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1, 2207 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
2200 &eeprom_data); 2208 &eeprom_data);
2201 else if (hw->bus.func == 1) 2209 else if (hw->bus.func == 1)
2202 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); 2210 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
2203 2211
@@ -2206,7 +2214,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2206 2214
2207 /* now that we have the eeprom settings, apply the special cases where 2215 /* now that we have the eeprom settings, apply the special cases where
2208 * the eeprom may be wrong or the board simply won't support wake on 2216 * the eeprom may be wrong or the board simply won't support wake on
2209 * lan on a particular port */ 2217 * lan on a particular port
2218 */
2210 switch (pdev->device) { 2219 switch (pdev->device) {
2211 case E1000_DEV_ID_82575GB_QUAD_COPPER: 2220 case E1000_DEV_ID_82575GB_QUAD_COPPER:
2212 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2221 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
@@ -2215,7 +2224,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2215 case E1000_DEV_ID_82576_FIBER: 2224 case E1000_DEV_ID_82576_FIBER:
2216 case E1000_DEV_ID_82576_SERDES: 2225 case E1000_DEV_ID_82576_SERDES:
2217 /* Wake events only supported on port A for dual fiber 2226 /* Wake events only supported on port A for dual fiber
2218 * regardless of eeprom setting */ 2227 * regardless of eeprom setting
2228 */
2219 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1) 2229 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
2220 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED; 2230 adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
2221 break; 2231 break;
@@ -2285,8 +2295,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2285 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) { 2295 if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
2286 u16 ets_word; 2296 u16 ets_word;
2287 2297
2288 /* 2298 /* Read the NVM to determine if this i350 device supports an
2289 * Read the NVM to determine if this i350 device supports an
2290 * external thermal sensor. 2299 * external thermal sensor.
2291 */ 2300 */
2292 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word); 2301 hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
@@ -2310,7 +2319,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2310 netdev->name, 2319 netdev->name,
2311 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" : 2320 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
2312 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" : 2321 (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
2313 "unknown"), 2322 "unknown"),
2314 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 2323 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
2315 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 2324 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
2316 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 2325 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -2355,7 +2364,7 @@ err_ioremap:
2355 free_netdev(netdev); 2364 free_netdev(netdev);
2356err_alloc_etherdev: 2365err_alloc_etherdev:
2357 pci_release_selected_regions(pdev, 2366 pci_release_selected_regions(pdev,
2358 pci_select_bars(pdev, IORESOURCE_MEM)); 2367 pci_select_bars(pdev, IORESOURCE_MEM));
2359err_pci_reg: 2368err_pci_reg:
2360err_dma: 2369err_dma:
2361 pci_disable_device(pdev); 2370 pci_disable_device(pdev);
@@ -2455,26 +2464,24 @@ out:
2455} 2464}
2456 2465
2457#endif 2466#endif
2458/* 2467/**
2459 * igb_remove_i2c - Cleanup I2C interface 2468 * igb_remove_i2c - Cleanup I2C interface
2460 * @adapter: pointer to adapter structure 2469 * @adapter: pointer to adapter structure
2461 * 2470 **/
2462 */
2463static void igb_remove_i2c(struct igb_adapter *adapter) 2471static void igb_remove_i2c(struct igb_adapter *adapter)
2464{ 2472{
2465
2466 /* free the adapter bus structure */ 2473 /* free the adapter bus structure */
2467 i2c_del_adapter(&adapter->i2c_adap); 2474 i2c_del_adapter(&adapter->i2c_adap);
2468} 2475}
2469 2476
2470/** 2477/**
2471 * igb_remove - Device Removal Routine 2478 * igb_remove - Device Removal Routine
2472 * @pdev: PCI device information struct 2479 * @pdev: PCI device information struct
2473 * 2480 *
2474 * igb_remove is called by the PCI subsystem to alert the driver 2481 * igb_remove is called by the PCI subsystem to alert the driver
2475 * that it should release a PCI device. The could be caused by a 2482 * that it should release a PCI device. The could be caused by a
2476 * Hot-Plug event, or because the driver is going to be removed from 2483 * Hot-Plug event, or because the driver is going to be removed from
2477 * memory. 2484 * memory.
2478 **/ 2485 **/
2479static void igb_remove(struct pci_dev *pdev) 2486static void igb_remove(struct pci_dev *pdev)
2480{ 2487{
@@ -2488,8 +2495,7 @@ static void igb_remove(struct pci_dev *pdev)
2488#endif 2495#endif
2489 igb_remove_i2c(adapter); 2496 igb_remove_i2c(adapter);
2490 igb_ptp_stop(adapter); 2497 igb_ptp_stop(adapter);
2491 /* 2498 /* The watchdog timer may be rescheduled, so explicitly
2492 * The watchdog timer may be rescheduled, so explicitly
2493 * disable watchdog from being rescheduled. 2499 * disable watchdog from being rescheduled.
2494 */ 2500 */
2495 set_bit(__IGB_DOWN, &adapter->state); 2501 set_bit(__IGB_DOWN, &adapter->state);
@@ -2509,7 +2515,8 @@ static void igb_remove(struct pci_dev *pdev)
2509#endif 2515#endif
2510 2516
2511 /* Release control of h/w to f/w. If f/w is AMT enabled, this 2517 /* Release control of h/w to f/w. If f/w is AMT enabled, this
2512 * would have already happened in close and is redundant. */ 2518 * would have already happened in close and is redundant.
2519 */
2513 igb_release_hw_control(adapter); 2520 igb_release_hw_control(adapter);
2514 2521
2515 unregister_netdev(netdev); 2522 unregister_netdev(netdev);
@@ -2524,7 +2531,7 @@ static void igb_remove(struct pci_dev *pdev)
2524 if (hw->flash_address) 2531 if (hw->flash_address)
2525 iounmap(hw->flash_address); 2532 iounmap(hw->flash_address);
2526 pci_release_selected_regions(pdev, 2533 pci_release_selected_regions(pdev,
2527 pci_select_bars(pdev, IORESOURCE_MEM)); 2534 pci_select_bars(pdev, IORESOURCE_MEM));
2528 2535
2529 kfree(adapter->shadow_vfta); 2536 kfree(adapter->shadow_vfta);
2530 free_netdev(netdev); 2537 free_netdev(netdev);
@@ -2535,13 +2542,13 @@ static void igb_remove(struct pci_dev *pdev)
2535} 2542}
2536 2543
2537/** 2544/**
2538 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space 2545 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
2539 * @adapter: board private structure to initialize 2546 * @adapter: board private structure to initialize
2540 * 2547 *
2541 * This function initializes the vf specific data storage and then attempts to 2548 * This function initializes the vf specific data storage and then attempts to
2542 * allocate the VFs. The reason for ordering it this way is because it is much 2549 * allocate the VFs. The reason for ordering it this way is because it is much
2543 * mor expensive time wise to disable SR-IOV than it is to allocate and free 2550 * mor expensive time wise to disable SR-IOV than it is to allocate and free
2544 * the memory for the VFs. 2551 * the memory for the VFs.
2545 **/ 2552 **/
2546static void igb_probe_vfs(struct igb_adapter *adapter) 2553static void igb_probe_vfs(struct igb_adapter *adapter)
2547{ 2554{
@@ -2601,8 +2608,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2601 /* Device supports enough interrupts without queue pairing. */ 2608 /* Device supports enough interrupts without queue pairing. */
2602 break; 2609 break;
2603 case e1000_82576: 2610 case e1000_82576:
2604 /* 2611 /* If VFs are going to be allocated with RSS queues then we
2605 * If VFs are going to be allocated with RSS queues then we
2606 * should pair the queues in order to conserve interrupts due 2612 * should pair the queues in order to conserve interrupts due
2607 * to limited supply. 2613 * to limited supply.
2608 */ 2614 */
@@ -2614,8 +2620,7 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2614 case e1000_i350: 2620 case e1000_i350:
2615 case e1000_i210: 2621 case e1000_i210:
2616 default: 2622 default:
2617 /* 2623 /* If rss_queues > half of max_rss_queues, pair the queues in
2618 * If rss_queues > half of max_rss_queues, pair the queues in
2619 * order to conserve interrupts due to limited supply. 2624 * order to conserve interrupts due to limited supply.
2620 */ 2625 */
2621 if (adapter->rss_queues > (max_rss_queues / 2)) 2626 if (adapter->rss_queues > (max_rss_queues / 2))
@@ -2625,12 +2630,12 @@ static void igb_init_queue_configuration(struct igb_adapter *adapter)
2625} 2630}
2626 2631
2627/** 2632/**
2628 * igb_sw_init - Initialize general software structures (struct igb_adapter) 2633 * igb_sw_init - Initialize general software structures (struct igb_adapter)
2629 * @adapter: board private structure to initialize 2634 * @adapter: board private structure to initialize
2630 * 2635 *
2631 * igb_sw_init initializes the Adapter private data structure. 2636 * igb_sw_init initializes the Adapter private data structure.
2632 * Fields are initialized based on PCI device information and 2637 * Fields are initialized based on PCI device information and
2633 * OS network device settings (MTU size). 2638 * OS network device settings (MTU size).
2634 **/ 2639 **/
2635static int igb_sw_init(struct igb_adapter *adapter) 2640static int igb_sw_init(struct igb_adapter *adapter)
2636{ 2641{
@@ -2700,16 +2705,16 @@ static int igb_sw_init(struct igb_adapter *adapter)
2700} 2705}
2701 2706
2702/** 2707/**
2703 * igb_open - Called when a network interface is made active 2708 * igb_open - Called when a network interface is made active
2704 * @netdev: network interface device structure 2709 * @netdev: network interface device structure
2705 * 2710 *
2706 * Returns 0 on success, negative value on failure 2711 * Returns 0 on success, negative value on failure
2707 * 2712 *
2708 * The open entry point is called when a network interface is made 2713 * The open entry point is called when a network interface is made
2709 * active by the system (IFF_UP). At this point all resources needed 2714 * active by the system (IFF_UP). At this point all resources needed
2710 * for transmit and receive operations are allocated, the interrupt 2715 * for transmit and receive operations are allocated, the interrupt
2711 * handler is registered with the OS, the watchdog timer is started, 2716 * handler is registered with the OS, the watchdog timer is started,
2712 * and the stack is notified that the interface is ready. 2717 * and the stack is notified that the interface is ready.
2713 **/ 2718 **/
2714static int __igb_open(struct net_device *netdev, bool resuming) 2719static int __igb_open(struct net_device *netdev, bool resuming)
2715{ 2720{
@@ -2745,7 +2750,8 @@ static int __igb_open(struct net_device *netdev, bool resuming)
2745 /* before we allocate an interrupt, we must be ready to handle it. 2750 /* before we allocate an interrupt, we must be ready to handle it.
2746 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 2751 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
2747 * as soon as we call pci_request_irq, so we have to setup our 2752 * as soon as we call pci_request_irq, so we have to setup our
2748 * clean_rx handler before we do so. */ 2753 * clean_rx handler before we do so.
2754 */
2749 igb_configure(adapter); 2755 igb_configure(adapter);
2750 2756
2751 err = igb_request_irq(adapter); 2757 err = igb_request_irq(adapter);
@@ -2814,15 +2820,15 @@ static int igb_open(struct net_device *netdev)
2814} 2820}
2815 2821
2816/** 2822/**
2817 * igb_close - Disables a network interface 2823 * igb_close - Disables a network interface
2818 * @netdev: network interface device structure 2824 * @netdev: network interface device structure
2819 * 2825 *
2820 * Returns 0, this is not allowed to fail 2826 * Returns 0, this is not allowed to fail
2821 * 2827 *
2822 * The close entry point is called when an interface is de-activated 2828 * The close entry point is called when an interface is de-activated
2823 * by the OS. The hardware is still under the driver's control, but 2829 * by the OS. The hardware is still under the driver's control, but
2824 * needs to be disabled. A global MAC reset is issued to stop the 2830 * needs to be disabled. A global MAC reset is issued to stop the
2825 * hardware, and all transmit and receive resources are freed. 2831 * hardware, and all transmit and receive resources are freed.
2826 **/ 2832 **/
2827static int __igb_close(struct net_device *netdev, bool suspending) 2833static int __igb_close(struct net_device *netdev, bool suspending)
2828{ 2834{
@@ -2851,10 +2857,10 @@ static int igb_close(struct net_device *netdev)
2851} 2857}
2852 2858
2853/** 2859/**
2854 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 2860 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
2855 * @tx_ring: tx descriptor ring (for a specific queue) to setup 2861 * @tx_ring: tx descriptor ring (for a specific queue) to setup
2856 * 2862 *
2857 * Return 0 on success, negative on failure 2863 * Return 0 on success, negative on failure
2858 **/ 2864 **/
2859int igb_setup_tx_resources(struct igb_ring *tx_ring) 2865int igb_setup_tx_resources(struct igb_ring *tx_ring)
2860{ 2866{
@@ -2889,11 +2895,11 @@ err:
2889} 2895}
2890 2896
2891/** 2897/**
2892 * igb_setup_all_tx_resources - wrapper to allocate Tx resources 2898 * igb_setup_all_tx_resources - wrapper to allocate Tx resources
2893 * (Descriptors) for all queues 2899 * (Descriptors) for all queues
2894 * @adapter: board private structure 2900 * @adapter: board private structure
2895 * 2901 *
2896 * Return 0 on success, negative on failure 2902 * Return 0 on success, negative on failure
2897 **/ 2903 **/
2898static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2904static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2899{ 2905{
@@ -2915,8 +2921,8 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
2915} 2921}
2916 2922
2917/** 2923/**
2918 * igb_setup_tctl - configure the transmit control registers 2924 * igb_setup_tctl - configure the transmit control registers
2919 * @adapter: Board private structure 2925 * @adapter: Board private structure
2920 **/ 2926 **/
2921void igb_setup_tctl(struct igb_adapter *adapter) 2927void igb_setup_tctl(struct igb_adapter *adapter)
2922{ 2928{
@@ -2941,11 +2947,11 @@ void igb_setup_tctl(struct igb_adapter *adapter)
2941} 2947}
2942 2948
2943/** 2949/**
2944 * igb_configure_tx_ring - Configure transmit ring after Reset 2950 * igb_configure_tx_ring - Configure transmit ring after Reset
2945 * @adapter: board private structure 2951 * @adapter: board private structure
2946 * @ring: tx ring to configure 2952 * @ring: tx ring to configure
2947 * 2953 *
2948 * Configure a transmit ring after a reset. 2954 * Configure a transmit ring after a reset.
2949 **/ 2955 **/
2950void igb_configure_tx_ring(struct igb_adapter *adapter, 2956void igb_configure_tx_ring(struct igb_adapter *adapter,
2951 struct igb_ring *ring) 2957 struct igb_ring *ring)
@@ -2961,9 +2967,9 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2961 mdelay(10); 2967 mdelay(10);
2962 2968
2963 wr32(E1000_TDLEN(reg_idx), 2969 wr32(E1000_TDLEN(reg_idx),
2964 ring->count * sizeof(union e1000_adv_tx_desc)); 2970 ring->count * sizeof(union e1000_adv_tx_desc));
2965 wr32(E1000_TDBAL(reg_idx), 2971 wr32(E1000_TDBAL(reg_idx),
2966 tdba & 0x00000000ffffffffULL); 2972 tdba & 0x00000000ffffffffULL);
2967 wr32(E1000_TDBAH(reg_idx), tdba >> 32); 2973 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2968 2974
2969 ring->tail = hw->hw_addr + E1000_TDT(reg_idx); 2975 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
@@ -2979,10 +2985,10 @@ void igb_configure_tx_ring(struct igb_adapter *adapter,
2979} 2985}
2980 2986
2981/** 2987/**
2982 * igb_configure_tx - Configure transmit Unit after Reset 2988 * igb_configure_tx - Configure transmit Unit after Reset
2983 * @adapter: board private structure 2989 * @adapter: board private structure
2984 * 2990 *
2985 * Configure the Tx unit of the MAC after a reset. 2991 * Configure the Tx unit of the MAC after a reset.
2986 **/ 2992 **/
2987static void igb_configure_tx(struct igb_adapter *adapter) 2993static void igb_configure_tx(struct igb_adapter *adapter)
2988{ 2994{
@@ -2993,10 +2999,10 @@ static void igb_configure_tx(struct igb_adapter *adapter)
2993} 2999}
2994 3000
2995/** 3001/**
2996 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 3002 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
2997 * @rx_ring: rx descriptor ring (for a specific queue) to setup 3003 * @rx_ring: Rx descriptor ring (for a specific queue) to setup
2998 * 3004 *
2999 * Returns 0 on success, negative on failure 3005 * Returns 0 on success, negative on failure
3000 **/ 3006 **/
3001int igb_setup_rx_resources(struct igb_ring *rx_ring) 3007int igb_setup_rx_resources(struct igb_ring *rx_ring)
3002{ 3008{
@@ -3032,11 +3038,11 @@ err:
3032} 3038}
3033 3039
3034/** 3040/**
3035 * igb_setup_all_rx_resources - wrapper to allocate Rx resources 3041 * igb_setup_all_rx_resources - wrapper to allocate Rx resources
3036 * (Descriptors) for all queues 3042 * (Descriptors) for all queues
3037 * @adapter: board private structure 3043 * @adapter: board private structure
3038 * 3044 *
3039 * Return 0 on success, negative on failure 3045 * Return 0 on success, negative on failure
3040 **/ 3046 **/
3041static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 3047static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3042{ 3048{
@@ -3058,8 +3064,8 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
3058} 3064}
3059 3065
3060/** 3066/**
3061 * igb_setup_mrqc - configure the multiple receive queue control registers 3067 * igb_setup_mrqc - configure the multiple receive queue control registers
3062 * @adapter: Board private structure 3068 * @adapter: Board private structure
3063 **/ 3069 **/
3064static void igb_setup_mrqc(struct igb_adapter *adapter) 3070static void igb_setup_mrqc(struct igb_adapter *adapter)
3065{ 3071{
@@ -3092,8 +3098,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3092 break; 3098 break;
3093 } 3099 }
3094 3100
3095 /* 3101 /* Populate the indirection table 4 entries at a time. To do this
3096 * Populate the indirection table 4 entries at a time. To do this
3097 * we are generating the results for n and n+2 and then interleaving 3102 * we are generating the results for n and n+2 and then interleaving
3098 * those with the results with n+1 and n+3. 3103 * those with the results with n+1 and n+3.
3099 */ 3104 */
@@ -3109,8 +3114,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3109 wr32(E1000_RETA(j), reta); 3114 wr32(E1000_RETA(j), reta);
3110 } 3115 }
3111 3116
3112 /* 3117 /* Disable raw packet checksumming so that RSS hash is placed in
3113 * Disable raw packet checksumming so that RSS hash is placed in
3114 * descriptor on writeback. No need to enable TCP/UDP/IP checksum 3118 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
3115 * offloads as they are enabled by default 3119 * offloads as they are enabled by default
3116 */ 3120 */
@@ -3140,7 +3144,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3140 3144
3141 /* If VMDq is enabled then we set the appropriate mode for that, else 3145 /* If VMDq is enabled then we set the appropriate mode for that, else
3142 * we default to RSS so that an RSS hash is calculated per packet even 3146 * we default to RSS so that an RSS hash is calculated per packet even
3143 * if we are only using one queue */ 3147 * if we are only using one queue
3148 */
3144 if (adapter->vfs_allocated_count) { 3149 if (adapter->vfs_allocated_count) {
3145 if (hw->mac.type > e1000_82575) { 3150 if (hw->mac.type > e1000_82575) {
3146 /* Set the default pool for the PF's first queue */ 3151 /* Set the default pool for the PF's first queue */
@@ -3165,8 +3170,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
3165} 3170}
3166 3171
3167/** 3172/**
3168 * igb_setup_rctl - configure the receive control registers 3173 * igb_setup_rctl - configure the receive control registers
3169 * @adapter: Board private structure 3174 * @adapter: Board private structure
3170 **/ 3175 **/
3171void igb_setup_rctl(struct igb_adapter *adapter) 3176void igb_setup_rctl(struct igb_adapter *adapter)
3172{ 3177{
@@ -3181,8 +3186,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3181 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 3186 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
3182 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 3187 (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3183 3188
3184 /* 3189 /* enable stripping of CRC. It's unlikely this will break BMC
3185 * enable stripping of CRC. It's unlikely this will break BMC
3186 * redirection as it did with e1000. Newer features require 3190 * redirection as it did with e1000. Newer features require
3187 * that the HW strips the CRC. 3191 * that the HW strips the CRC.
3188 */ 3192 */
@@ -3209,7 +3213,8 @@ void igb_setup_rctl(struct igb_adapter *adapter)
3209 /* This is useful for sniffing bad packets. */ 3213 /* This is useful for sniffing bad packets. */
3210 if (adapter->netdev->features & NETIF_F_RXALL) { 3214 if (adapter->netdev->features & NETIF_F_RXALL) {
3211 /* UPE and MPE will be handled by normal PROMISC logic 3215 /* UPE and MPE will be handled by normal PROMISC logic
3212 * in e1000e_set_rx_mode */ 3216 * in e1000e_set_rx_mode
3217 */
3213 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */ 3218 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3214 E1000_RCTL_BAM | /* RX All Bcast Pkts */ 3219 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3215 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */ 3220 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
@@ -3232,7 +3237,8 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3232 u32 vmolr; 3237 u32 vmolr;
3233 3238
3234 /* if it isn't the PF check to see if VFs are enabled and 3239 /* if it isn't the PF check to see if VFs are enabled and
3235 * increase the size to support vlan tags */ 3240 * increase the size to support vlan tags
3241 */
3236 if (vfn < adapter->vfs_allocated_count && 3242 if (vfn < adapter->vfs_allocated_count &&
3237 adapter->vf_data[vfn].vlans_enabled) 3243 adapter->vf_data[vfn].vlans_enabled)
3238 size += VLAN_TAG_SIZE; 3244 size += VLAN_TAG_SIZE;
@@ -3246,10 +3252,10 @@ static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
3246} 3252}
3247 3253
3248/** 3254/**
3249 * igb_rlpml_set - set maximum receive packet size 3255 * igb_rlpml_set - set maximum receive packet size
3250 * @adapter: board private structure 3256 * @adapter: board private structure
3251 * 3257 *
3252 * Configure maximum receivable packet size. 3258 * Configure maximum receivable packet size.
3253 **/ 3259 **/
3254static void igb_rlpml_set(struct igb_adapter *adapter) 3260static void igb_rlpml_set(struct igb_adapter *adapter)
3255{ 3261{
@@ -3259,8 +3265,7 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
3259 3265
3260 if (pf_id) { 3266 if (pf_id) {
3261 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 3267 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
3262 /* 3268 /* If we're in VMDQ or SR-IOV mode, then set global RLPML
3263 * If we're in VMDQ or SR-IOV mode, then set global RLPML
3264 * to our max jumbo frame size, in case we need to enable 3269 * to our max jumbo frame size, in case we need to enable
3265 * jumbo frames on one of the rings later. 3270 * jumbo frames on one of the rings later.
3266 * This will not pass over-length frames into the default 3271 * This will not pass over-length frames into the default
@@ -3278,17 +3283,16 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3278 struct e1000_hw *hw = &adapter->hw; 3283 struct e1000_hw *hw = &adapter->hw;
3279 u32 vmolr; 3284 u32 vmolr;
3280 3285
3281 /* 3286 /* This register exists only on 82576 and newer so if we are older then
3282 * This register exists only on 82576 and newer so if we are older then
3283 * we should exit and do nothing 3287 * we should exit and do nothing
3284 */ 3288 */
3285 if (hw->mac.type < e1000_82576) 3289 if (hw->mac.type < e1000_82576)
3286 return; 3290 return;
3287 3291
3288 vmolr = rd32(E1000_VMOLR(vfn)); 3292 vmolr = rd32(E1000_VMOLR(vfn));
3289 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */ 3293 vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
3290 if (aupe) 3294 if (aupe)
3291 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */ 3295 vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
3292 else 3296 else
3293 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */ 3297 vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
3294 3298
@@ -3297,25 +3301,24 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter,
3297 3301
3298 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count) 3302 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
3299 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */ 3303 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
3300 /* 3304 /* for VMDq only allow the VFs and pool 0 to accept broadcast and
3301 * for VMDq only allow the VFs and pool 0 to accept broadcast and
3302 * multicast packets 3305 * multicast packets
3303 */ 3306 */
3304 if (vfn <= adapter->vfs_allocated_count) 3307 if (vfn <= adapter->vfs_allocated_count)
3305 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */ 3308 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
3306 3309
3307 wr32(E1000_VMOLR(vfn), vmolr); 3310 wr32(E1000_VMOLR(vfn), vmolr);
3308} 3311}
3309 3312
3310/** 3313/**
3311 * igb_configure_rx_ring - Configure a receive ring after Reset 3314 * igb_configure_rx_ring - Configure a receive ring after Reset
3312 * @adapter: board private structure 3315 * @adapter: board private structure
3313 * @ring: receive ring to be configured 3316 * @ring: receive ring to be configured
3314 * 3317 *
3315 * Configure the Rx unit of the MAC after a reset. 3318 * Configure the Rx unit of the MAC after a reset.
3316 **/ 3319 **/
3317void igb_configure_rx_ring(struct igb_adapter *adapter, 3320void igb_configure_rx_ring(struct igb_adapter *adapter,
3318 struct igb_ring *ring) 3321 struct igb_ring *ring)
3319{ 3322{
3320 struct e1000_hw *hw = &adapter->hw; 3323 struct e1000_hw *hw = &adapter->hw;
3321 u64 rdba = ring->dma; 3324 u64 rdba = ring->dma;
@@ -3330,7 +3333,7 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
3330 rdba & 0x00000000ffffffffULL); 3333 rdba & 0x00000000ffffffffULL);
3331 wr32(E1000_RDBAH(reg_idx), rdba >> 32); 3334 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
3332 wr32(E1000_RDLEN(reg_idx), 3335 wr32(E1000_RDLEN(reg_idx),
3333 ring->count * sizeof(union e1000_adv_rx_desc)); 3336 ring->count * sizeof(union e1000_adv_rx_desc));
3334 3337
3335 /* initialize head and tail */ 3338 /* initialize head and tail */
3336 ring->tail = hw->hw_addr + E1000_RDT(reg_idx); 3339 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
@@ -3376,10 +3379,10 @@ static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
3376} 3379}
3377 3380
3378/** 3381/**
3379 * igb_configure_rx - Configure receive Unit after Reset 3382 * igb_configure_rx - Configure receive Unit after Reset
3380 * @adapter: board private structure 3383 * @adapter: board private structure
3381 * 3384 *
3382 * Configure the Rx unit of the MAC after a reset. 3385 * Configure the Rx unit of the MAC after a reset.
3383 **/ 3386 **/
3384static void igb_configure_rx(struct igb_adapter *adapter) 3387static void igb_configure_rx(struct igb_adapter *adapter)
3385{ 3388{
@@ -3390,10 +3393,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3390 3393
3391 /* set the correct pool for the PF default MAC address in entry 0 */ 3394 /* set the correct pool for the PF default MAC address in entry 0 */
3392 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 3395 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
3393 adapter->vfs_allocated_count); 3396 adapter->vfs_allocated_count);
3394 3397
3395 /* Setup the HW Rx Head and Tail Descriptor Pointers and 3398 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3396 * the Base and Length of the Rx Descriptor Ring */ 3399 * the Base and Length of the Rx Descriptor Ring
3400 */
3397 for (i = 0; i < adapter->num_rx_queues; i++) { 3401 for (i = 0; i < adapter->num_rx_queues; i++) {
3398 struct igb_ring *rx_ring = adapter->rx_ring[i]; 3402 struct igb_ring *rx_ring = adapter->rx_ring[i];
3399 igb_set_rx_buffer_len(adapter, rx_ring); 3403 igb_set_rx_buffer_len(adapter, rx_ring);
@@ -3402,10 +3406,10 @@ static void igb_configure_rx(struct igb_adapter *adapter)
3402} 3406}
3403 3407
3404/** 3408/**
3405 * igb_free_tx_resources - Free Tx Resources per Queue 3409 * igb_free_tx_resources - Free Tx Resources per Queue
3406 * @tx_ring: Tx descriptor ring for a specific queue 3410 * @tx_ring: Tx descriptor ring for a specific queue
3407 * 3411 *
3408 * Free all transmit software resources 3412 * Free all transmit software resources
3409 **/ 3413 **/
3410void igb_free_tx_resources(struct igb_ring *tx_ring) 3414void igb_free_tx_resources(struct igb_ring *tx_ring)
3411{ 3415{
@@ -3425,10 +3429,10 @@ void igb_free_tx_resources(struct igb_ring *tx_ring)
3425} 3429}
3426 3430
3427/** 3431/**
3428 * igb_free_all_tx_resources - Free Tx Resources for All Queues 3432 * igb_free_all_tx_resources - Free Tx Resources for All Queues
3429 * @adapter: board private structure 3433 * @adapter: board private structure
3430 * 3434 *
3431 * Free all transmit software resources 3435 * Free all transmit software resources
3432 **/ 3436 **/
3433static void igb_free_all_tx_resources(struct igb_adapter *adapter) 3437static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3434{ 3438{
@@ -3461,8 +3465,8 @@ void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
3461} 3465}
3462 3466
3463/** 3467/**
3464 * igb_clean_tx_ring - Free Tx Buffers 3468 * igb_clean_tx_ring - Free Tx Buffers
3465 * @tx_ring: ring to be cleaned 3469 * @tx_ring: ring to be cleaned
3466 **/ 3470 **/
3467static void igb_clean_tx_ring(struct igb_ring *tx_ring) 3471static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3468{ 3472{
@@ -3492,8 +3496,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
3492} 3496}
3493 3497
3494/** 3498/**
3495 * igb_clean_all_tx_rings - Free Tx Buffers for all queues 3499 * igb_clean_all_tx_rings - Free Tx Buffers for all queues
3496 * @adapter: board private structure 3500 * @adapter: board private structure
3497 **/ 3501 **/
3498static void igb_clean_all_tx_rings(struct igb_adapter *adapter) 3502static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3499{ 3503{
@@ -3504,10 +3508,10 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3504} 3508}
3505 3509
3506/** 3510/**
3507 * igb_free_rx_resources - Free Rx Resources 3511 * igb_free_rx_resources - Free Rx Resources
3508 * @rx_ring: ring to clean the resources from 3512 * @rx_ring: ring to clean the resources from
3509 * 3513 *
3510 * Free all receive software resources 3514 * Free all receive software resources
3511 **/ 3515 **/
3512void igb_free_rx_resources(struct igb_ring *rx_ring) 3516void igb_free_rx_resources(struct igb_ring *rx_ring)
3513{ 3517{
@@ -3527,10 +3531,10 @@ void igb_free_rx_resources(struct igb_ring *rx_ring)
3527} 3531}
3528 3532
3529/** 3533/**
3530 * igb_free_all_rx_resources - Free Rx Resources for All Queues 3534 * igb_free_all_rx_resources - Free Rx Resources for All Queues
3531 * @adapter: board private structure 3535 * @adapter: board private structure
3532 * 3536 *
3533 * Free all receive software resources 3537 * Free all receive software resources
3534 **/ 3538 **/
3535static void igb_free_all_rx_resources(struct igb_adapter *adapter) 3539static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3536{ 3540{
@@ -3541,8 +3545,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3541} 3545}
3542 3546
3543/** 3547/**
3544 * igb_clean_rx_ring - Free Rx Buffers per Queue 3548 * igb_clean_rx_ring - Free Rx Buffers per Queue
3545 * @rx_ring: ring to free buffers from 3549 * @rx_ring: ring to free buffers from
3546 **/ 3550 **/
3547static void igb_clean_rx_ring(struct igb_ring *rx_ring) 3551static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3548{ 3552{
@@ -3584,8 +3588,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
3584} 3588}
3585 3589
3586/** 3590/**
3587 * igb_clean_all_rx_rings - Free Rx Buffers for all queues 3591 * igb_clean_all_rx_rings - Free Rx Buffers for all queues
3588 * @adapter: board private structure 3592 * @adapter: board private structure
3589 **/ 3593 **/
3590static void igb_clean_all_rx_rings(struct igb_adapter *adapter) 3594static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3591{ 3595{
@@ -3596,11 +3600,11 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3596} 3600}
3597 3601
3598/** 3602/**
3599 * igb_set_mac - Change the Ethernet Address of the NIC 3603 * igb_set_mac - Change the Ethernet Address of the NIC
3600 * @netdev: network interface device structure 3604 * @netdev: network interface device structure
3601 * @p: pointer to an address structure 3605 * @p: pointer to an address structure
3602 * 3606 *
3603 * Returns 0 on success, negative on failure 3607 * Returns 0 on success, negative on failure
3604 **/ 3608 **/
3605static int igb_set_mac(struct net_device *netdev, void *p) 3609static int igb_set_mac(struct net_device *netdev, void *p)
3606{ 3610{
@@ -3616,19 +3620,19 @@ static int igb_set_mac(struct net_device *netdev, void *p)
3616 3620
3617 /* set the correct pool for the new PF MAC address in entry 0 */ 3621 /* set the correct pool for the new PF MAC address in entry 0 */
3618 igb_rar_set_qsel(adapter, hw->mac.addr, 0, 3622 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
3619 adapter->vfs_allocated_count); 3623 adapter->vfs_allocated_count);
3620 3624
3621 return 0; 3625 return 0;
3622} 3626}
3623 3627
3624/** 3628/**
3625 * igb_write_mc_addr_list - write multicast addresses to MTA 3629 * igb_write_mc_addr_list - write multicast addresses to MTA
3626 * @netdev: network interface device structure 3630 * @netdev: network interface device structure
3627 * 3631 *
3628 * Writes multicast address list to the MTA hash table. 3632 * Writes multicast address list to the MTA hash table.
3629 * Returns: -ENOMEM on failure 3633 * Returns: -ENOMEM on failure
3630 * 0 on no addresses written 3634 * 0 on no addresses written
3631 * X on writing X addresses to MTA 3635 * X on writing X addresses to MTA
3632 **/ 3636 **/
3633static int igb_write_mc_addr_list(struct net_device *netdev) 3637static int igb_write_mc_addr_list(struct net_device *netdev)
3634{ 3638{
@@ -3661,13 +3665,13 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
3661} 3665}
3662 3666
3663/** 3667/**
3664 * igb_write_uc_addr_list - write unicast addresses to RAR table 3668 * igb_write_uc_addr_list - write unicast addresses to RAR table
3665 * @netdev: network interface device structure 3669 * @netdev: network interface device structure
3666 * 3670 *
3667 * Writes unicast address list to the RAR table. 3671 * Writes unicast address list to the RAR table.
3668 * Returns: -ENOMEM on failure/insufficient address space 3672 * Returns: -ENOMEM on failure/insufficient address space
3669 * 0 on no addresses written 3673 * 0 on no addresses written
3670 * X on writing X addresses to the RAR table 3674 * X on writing X addresses to the RAR table
3671 **/ 3675 **/
3672static int igb_write_uc_addr_list(struct net_device *netdev) 3676static int igb_write_uc_addr_list(struct net_device *netdev)
3673{ 3677{
@@ -3688,8 +3692,8 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3688 if (!rar_entries) 3692 if (!rar_entries)
3689 break; 3693 break;
3690 igb_rar_set_qsel(adapter, ha->addr, 3694 igb_rar_set_qsel(adapter, ha->addr,
3691 rar_entries--, 3695 rar_entries--,
3692 vfn); 3696 vfn);
3693 count++; 3697 count++;
3694 } 3698 }
3695 } 3699 }
@@ -3704,13 +3708,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
3704} 3708}
3705 3709
3706/** 3710/**
3707 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 3711 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
3708 * @netdev: network interface device structure 3712 * @netdev: network interface device structure
3709 * 3713 *
3710 * The set_rx_mode entry point is called whenever the unicast or multicast 3714 * The set_rx_mode entry point is called whenever the unicast or multicast
3711 * address lists or the network interface flags are updated. This routine is 3715 * address lists or the network interface flags are updated. This routine is
3712 * responsible for configuring the hardware for proper unicast, multicast, 3716 * responsible for configuring the hardware for proper unicast, multicast,
3713 * promiscuous mode, and all-multi behavior. 3717 * promiscuous mode, and all-multi behavior.
3714 **/ 3718 **/
3715static void igb_set_rx_mode(struct net_device *netdev) 3719static void igb_set_rx_mode(struct net_device *netdev)
3716{ 3720{
@@ -3734,8 +3738,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3734 rctl |= E1000_RCTL_MPE; 3738 rctl |= E1000_RCTL_MPE;
3735 vmolr |= E1000_VMOLR_MPME; 3739 vmolr |= E1000_VMOLR_MPME;
3736 } else { 3740 } else {
3737 /* 3741 /* Write addresses to the MTA, if the attempt fails
3738 * Write addresses to the MTA, if the attempt fails
3739 * then we should just turn on promiscuous mode so 3742 * then we should just turn on promiscuous mode so
3740 * that we can at least receive multicast traffic 3743 * that we can at least receive multicast traffic
3741 */ 3744 */
@@ -3747,8 +3750,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3747 vmolr |= E1000_VMOLR_ROMPE; 3750 vmolr |= E1000_VMOLR_ROMPE;
3748 } 3751 }
3749 } 3752 }
3750 /* 3753 /* Write addresses to available RAR registers, if there is not
3751 * Write addresses to available RAR registers, if there is not
3752 * sufficient space to store all the addresses then enable 3754 * sufficient space to store all the addresses then enable
3753 * unicast promiscuous mode 3755 * unicast promiscuous mode
3754 */ 3756 */
@@ -3761,8 +3763,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3761 } 3763 }
3762 wr32(E1000_RCTL, rctl); 3764 wr32(E1000_RCTL, rctl);
3763 3765
3764 /* 3766 /* In order to support SR-IOV and eventually VMDq it is necessary to set
3765 * In order to support SR-IOV and eventually VMDq it is necessary to set
3766 * the VMOLR to enable the appropriate modes. Without this workaround 3767 * the VMOLR to enable the appropriate modes. Without this workaround
3767 * we will have issues with VLAN tag stripping not being done for frames 3768 * we will have issues with VLAN tag stripping not being done for frames
3768 * that are only arriving because we are the default pool 3769 * that are only arriving because we are the default pool
@@ -3771,7 +3772,7 @@ static void igb_set_rx_mode(struct net_device *netdev)
3771 return; 3772 return;
3772 3773
3773 vmolr |= rd32(E1000_VMOLR(vfn)) & 3774 vmolr |= rd32(E1000_VMOLR(vfn)) &
3774 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE); 3775 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
3775 wr32(E1000_VMOLR(vfn), vmolr); 3776 wr32(E1000_VMOLR(vfn), vmolr);
3776 igb_restore_vf_multicasts(adapter); 3777 igb_restore_vf_multicasts(adapter);
3777} 3778}
@@ -3816,7 +3817,8 @@ static void igb_spoof_check(struct igb_adapter *adapter)
3816} 3817}
3817 3818
3818/* Need to wait a few seconds after link up to get diagnostic information from 3819/* Need to wait a few seconds after link up to get diagnostic information from
3819 * the phy */ 3820 * the phy
3821 */
3820static void igb_update_phy_info(unsigned long data) 3822static void igb_update_phy_info(unsigned long data)
3821{ 3823{
3822 struct igb_adapter *adapter = (struct igb_adapter *) data; 3824 struct igb_adapter *adapter = (struct igb_adapter *) data;
@@ -3824,8 +3826,8 @@ static void igb_update_phy_info(unsigned long data)
3824} 3826}
3825 3827
3826/** 3828/**
3827 * igb_has_link - check shared code for link and determine up/down 3829 * igb_has_link - check shared code for link and determine up/down
3828 * @adapter: pointer to driver private info 3830 * @adapter: pointer to driver private info
3829 **/ 3831 **/
3830bool igb_has_link(struct igb_adapter *adapter) 3832bool igb_has_link(struct igb_adapter *adapter)
3831{ 3833{
@@ -3878,8 +3880,8 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
3878} 3880}
3879 3881
3880/** 3882/**
3881 * igb_watchdog - Timer Call-back 3883 * igb_watchdog - Timer Call-back
3882 * @data: pointer to adapter cast into an unsigned long 3884 * @data: pointer to adapter cast into an unsigned long
3883 **/ 3885 **/
3884static void igb_watchdog(unsigned long data) 3886static void igb_watchdog(unsigned long data)
3885{ 3887{
@@ -3891,8 +3893,8 @@ static void igb_watchdog(unsigned long data)
3891static void igb_watchdog_task(struct work_struct *work) 3893static void igb_watchdog_task(struct work_struct *work)
3892{ 3894{
3893 struct igb_adapter *adapter = container_of(work, 3895 struct igb_adapter *adapter = container_of(work,
3894 struct igb_adapter, 3896 struct igb_adapter,
3895 watchdog_task); 3897 watchdog_task);
3896 struct e1000_hw *hw = &adapter->hw; 3898 struct e1000_hw *hw = &adapter->hw;
3897 struct net_device *netdev = adapter->netdev; 3899 struct net_device *netdev = adapter->netdev;
3898 u32 link; 3900 u32 link;
@@ -3906,8 +3908,8 @@ static void igb_watchdog_task(struct work_struct *work)
3906 if (!netif_carrier_ok(netdev)) { 3908 if (!netif_carrier_ok(netdev)) {
3907 u32 ctrl; 3909 u32 ctrl;
3908 hw->mac.ops.get_speed_and_duplex(hw, 3910 hw->mac.ops.get_speed_and_duplex(hw,
3909 &adapter->link_speed, 3911 &adapter->link_speed,
3910 &adapter->link_duplex); 3912 &adapter->link_duplex);
3911 3913
3912 ctrl = rd32(E1000_CTRL); 3914 ctrl = rd32(E1000_CTRL);
3913 /* Links status message must follow this format */ 3915 /* Links status message must follow this format */
@@ -3990,7 +3992,8 @@ static void igb_watchdog_task(struct work_struct *work)
3990 /* We've lost link, so the controller stops DMA, 3992 /* We've lost link, so the controller stops DMA,
3991 * but we've got queued Tx work that's never going 3993 * but we've got queued Tx work that's never going
3992 * to get done, so reset controller to flush Tx. 3994 * to get done, so reset controller to flush Tx.
3993 * (Do the reset outside of interrupt context). */ 3995 * (Do the reset outside of interrupt context).
3996 */
3994 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) { 3997 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
3995 adapter->tx_timeout_count++; 3998 adapter->tx_timeout_count++;
3996 schedule_work(&adapter->reset_task); 3999 schedule_work(&adapter->reset_task);
@@ -4003,7 +4006,7 @@ static void igb_watchdog_task(struct work_struct *work)
4003 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 4006 set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
4004 } 4007 }
4005 4008
4006 /* Cause software interrupt to ensure rx ring is cleaned */ 4009 /* Cause software interrupt to ensure Rx ring is cleaned */
4007 if (adapter->msix_entries) { 4010 if (adapter->msix_entries) {
4008 u32 eics = 0; 4011 u32 eics = 0;
4009 for (i = 0; i < adapter->num_q_vectors; i++) 4012 for (i = 0; i < adapter->num_q_vectors; i++)
@@ -4030,20 +4033,20 @@ enum latency_range {
4030}; 4033};
4031 4034
4032/** 4035/**
4033 * igb_update_ring_itr - update the dynamic ITR value based on packet size 4036 * igb_update_ring_itr - update the dynamic ITR value based on packet size
4037 * @q_vector: pointer to q_vector
4034 * 4038 *
4035 * Stores a new ITR value based on strictly on packet size. This 4039 * Stores a new ITR value based on strictly on packet size. This
4036 * algorithm is less sophisticated than that used in igb_update_itr, 4040 * algorithm is less sophisticated than that used in igb_update_itr,
4037 * due to the difficulty of synchronizing statistics across multiple 4041 * due to the difficulty of synchronizing statistics across multiple
4038 * receive rings. The divisors and thresholds used by this function 4042 * receive rings. The divisors and thresholds used by this function
4039 * were determined based on theoretical maximum wire speed and testing 4043 * were determined based on theoretical maximum wire speed and testing
4040 * data, in order to minimize response time while increasing bulk 4044 * data, in order to minimize response time while increasing bulk
4041 * throughput. 4045 * throughput.
4042 * This functionality is controlled by the InterruptThrottleRate module 4046 * This functionality is controlled by the InterruptThrottleRate module
4043 * parameter (see igb_param.c) 4047 * parameter (see igb_param.c)
4044 * NOTE: This function is called only when operating in a multiqueue 4048 * NOTE: This function is called only when operating in a multiqueue
4045 * receive environment. 4049 * receive environment.
4046 * @q_vector: pointer to q_vector
4047 **/ 4050 **/
4048static void igb_update_ring_itr(struct igb_q_vector *q_vector) 4051static void igb_update_ring_itr(struct igb_q_vector *q_vector)
4049{ 4052{
@@ -4104,20 +4107,21 @@ clear_counts:
4104} 4107}
4105 4108
4106/** 4109/**
4107 * igb_update_itr - update the dynamic ITR value based on statistics 4110 * igb_update_itr - update the dynamic ITR value based on statistics
4108 * Stores a new ITR value based on packets and byte 4111 * @q_vector: pointer to q_vector
4109 * counts during the last interrupt. The advantage of per interrupt 4112 * @ring_container: ring info to update the itr for
4110 * computation is faster updates and more accurate ITR for the current 4113 *
4111 * traffic pattern. Constants in this function were computed 4114 * Stores a new ITR value based on packets and byte
4112 * based on theoretical maximum wire speed and thresholds were set based 4115 * counts during the last interrupt. The advantage of per interrupt
4113 * on testing data as well as attempting to minimize response time 4116 * computation is faster updates and more accurate ITR for the current
4114 * while increasing bulk throughput. 4117 * traffic pattern. Constants in this function were computed
4115 * this functionality is controlled by the InterruptThrottleRate module 4118 * based on theoretical maximum wire speed and thresholds were set based
4116 * parameter (see igb_param.c) 4119 * on testing data as well as attempting to minimize response time
4117 * NOTE: These calculations are only valid when operating in a single- 4120 * while increasing bulk throughput.
4118 * queue environment. 4121 * this functionality is controlled by the InterruptThrottleRate module
4119 * @q_vector: pointer to q_vector 4122 * parameter (see igb_param.c)
4120 * @ring_container: ring info to update the itr for 4123 * NOTE: These calculations are only valid when operating in a single-
4124 * queue environment.
4121 **/ 4125 **/
4122static void igb_update_itr(struct igb_q_vector *q_vector, 4126static void igb_update_itr(struct igb_q_vector *q_vector,
4123 struct igb_ring_container *ring_container) 4127 struct igb_ring_container *ring_container)
@@ -4215,12 +4219,12 @@ set_itr_now:
4215 if (new_itr != q_vector->itr_val) { 4219 if (new_itr != q_vector->itr_val) {
4216 /* this attempts to bias the interrupt rate towards Bulk 4220 /* this attempts to bias the interrupt rate towards Bulk
4217 * by adding intermediate steps when interrupt rate is 4221 * by adding intermediate steps when interrupt rate is
4218 * increasing */ 4222 * increasing
4223 */
4219 new_itr = new_itr > q_vector->itr_val ? 4224 new_itr = new_itr > q_vector->itr_val ?
4220 max((new_itr * q_vector->itr_val) / 4225 max((new_itr * q_vector->itr_val) /
4221 (new_itr + (q_vector->itr_val >> 2)), 4226 (new_itr + (q_vector->itr_val >> 2)),
4222 new_itr) : 4227 new_itr) : new_itr;
4223 new_itr;
4224 /* Don't write the value here; it resets the adapter's 4228 /* Don't write the value here; it resets the adapter's
4225 * internal timer, and causes us to delay far longer than 4229 * internal timer, and causes us to delay far longer than
4226 * we should between interrupts. Instead, we write the ITR 4230 * we should between interrupts. Instead, we write the ITR
@@ -4347,8 +4351,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4347 default: 4351 default:
4348 if (unlikely(net_ratelimit())) { 4352 if (unlikely(net_ratelimit())) {
4349 dev_warn(tx_ring->dev, 4353 dev_warn(tx_ring->dev,
4350 "partial checksum but proto=%x!\n", 4354 "partial checksum but proto=%x!\n",
4351 first->protocol); 4355 first->protocol);
4352 } 4356 }
4353 break; 4357 break;
4354 } 4358 }
@@ -4371,8 +4375,8 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
4371 default: 4375 default:
4372 if (unlikely(net_ratelimit())) { 4376 if (unlikely(net_ratelimit())) {
4373 dev_warn(tx_ring->dev, 4377 dev_warn(tx_ring->dev,
4374 "partial checksum but l4 proto=%x!\n", 4378 "partial checksum but l4 proto=%x!\n",
4375 l4_hdr); 4379 l4_hdr);
4376 } 4380 }
4377 break; 4381 break;
4378 } 4382 }
@@ -4524,8 +4528,7 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4524 /* set the timestamp */ 4528 /* set the timestamp */
4525 first->time_stamp = jiffies; 4529 first->time_stamp = jiffies;
4526 4530
4527 /* 4531 /* Force memory writes to complete before letting h/w know there
4528 * Force memory writes to complete before letting h/w know there
4529 * are new descriptors to fetch. (Only applicable for weak-ordered 4532 * are new descriptors to fetch. (Only applicable for weak-ordered
4530 * memory model archs, such as IA-64). 4533 * memory model archs, such as IA-64).
4531 * 4534 *
@@ -4546,7 +4549,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
4546 writel(i, tx_ring->tail); 4549 writel(i, tx_ring->tail);
4547 4550
4548 /* we need this if more than one processor can write to our tail 4551 /* we need this if more than one processor can write to our tail
4549 * at a time, it syncronizes IO on IA64/Altix systems */ 4552 * at a time, it synchronizes IO on IA64/Altix systems
4553 */
4550 mmiowb(); 4554 mmiowb();
4551 4555
4552 return; 4556 return;
@@ -4576,11 +4580,13 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
4576 4580
4577 /* Herbert's original patch had: 4581 /* Herbert's original patch had:
4578 * smp_mb__after_netif_stop_queue(); 4582 * smp_mb__after_netif_stop_queue();
4579 * but since that doesn't exist yet, just open code it. */ 4583 * but since that doesn't exist yet, just open code it.
4584 */
4580 smp_mb(); 4585 smp_mb();
4581 4586
4582 /* We need to check again in a case another CPU has just 4587 /* We need to check again in a case another CPU has just
4583 * made room available. */ 4588 * made room available.
4589 */
4584 if (igb_desc_unused(tx_ring) < size) 4590 if (igb_desc_unused(tx_ring) < size)
4585 return -EBUSY; 4591 return -EBUSY;
4586 4592
@@ -4706,8 +4712,7 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4706 return NETDEV_TX_OK; 4712 return NETDEV_TX_OK;
4707 } 4713 }
4708 4714
4709 /* 4715 /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
4710 * The minimum packet size with TCTL.PSP set is 17 so pad the skb
4711 * in order to meet this minimum size requirement. 4716 * in order to meet this minimum size requirement.
4712 */ 4717 */
4713 if (unlikely(skb->len < 17)) { 4718 if (unlikely(skb->len < 17)) {
@@ -4721,8 +4726,8 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
4721} 4726}
4722 4727
4723/** 4728/**
4724 * igb_tx_timeout - Respond to a Tx Hang 4729 * igb_tx_timeout - Respond to a Tx Hang
4725 * @netdev: network interface device structure 4730 * @netdev: network interface device structure
4726 **/ 4731 **/
4727static void igb_tx_timeout(struct net_device *netdev) 4732static void igb_tx_timeout(struct net_device *netdev)
4728{ 4733{
@@ -4751,13 +4756,12 @@ static void igb_reset_task(struct work_struct *work)
4751} 4756}
4752 4757
4753/** 4758/**
4754 * igb_get_stats64 - Get System Network Statistics 4759 * igb_get_stats64 - Get System Network Statistics
4755 * @netdev: network interface device structure 4760 * @netdev: network interface device structure
4756 * @stats: rtnl_link_stats64 pointer 4761 * @stats: rtnl_link_stats64 pointer
4757 *
4758 **/ 4762 **/
4759static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev, 4763static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4760 struct rtnl_link_stats64 *stats) 4764 struct rtnl_link_stats64 *stats)
4761{ 4765{
4762 struct igb_adapter *adapter = netdev_priv(netdev); 4766 struct igb_adapter *adapter = netdev_priv(netdev);
4763 4767
@@ -4770,11 +4774,11 @@ static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4770} 4774}
4771 4775
4772/** 4776/**
4773 * igb_change_mtu - Change the Maximum Transfer Unit 4777 * igb_change_mtu - Change the Maximum Transfer Unit
4774 * @netdev: network interface device structure 4778 * @netdev: network interface device structure
4775 * @new_mtu: new value for maximum frame size 4779 * @new_mtu: new value for maximum frame size
4776 * 4780 *
4777 * Returns 0 on success, negative on failure 4781 * Returns 0 on success, negative on failure
4778 **/ 4782 **/
4779static int igb_change_mtu(struct net_device *netdev, int new_mtu) 4783static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4780{ 4784{
@@ -4817,10 +4821,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4817} 4821}
4818 4822
4819/** 4823/**
4820 * igb_update_stats - Update the board statistics counters 4824 * igb_update_stats - Update the board statistics counters
4821 * @adapter: board private structure 4825 * @adapter: board private structure
4822 **/ 4826 **/
4823
4824void igb_update_stats(struct igb_adapter *adapter, 4827void igb_update_stats(struct igb_adapter *adapter,
4825 struct rtnl_link_stats64 *net_stats) 4828 struct rtnl_link_stats64 *net_stats)
4826{ 4829{
@@ -4835,8 +4838,7 @@ void igb_update_stats(struct igb_adapter *adapter,
4835 4838
4836#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4839#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4837 4840
4838 /* 4841 /* Prevent stats update while adapter is being reset, or if the pci
4839 * Prevent stats update while adapter is being reset, or if the pci
4840 * connection is down. 4842 * connection is down.
4841 */ 4843 */
4842 if (adapter->link_speed == 0) 4844 if (adapter->link_speed == 0)
@@ -4970,7 +4972,8 @@ void igb_update_stats(struct igb_adapter *adapter,
4970 /* Rx Errors */ 4972 /* Rx Errors */
4971 4973
4972 /* RLEC on some newer hardware can be incorrect so build 4974 /* RLEC on some newer hardware can be incorrect so build
4973 * our own version based on RUC and ROC */ 4975 * our own version based on RUC and ROC
4976 */
4974 net_stats->rx_errors = adapter->stats.rxerrc + 4977 net_stats->rx_errors = adapter->stats.rxerrc +
4975 adapter->stats.crcerrs + adapter->stats.algnerrc + 4978 adapter->stats.crcerrs + adapter->stats.algnerrc +
4976 adapter->stats.ruc + adapter->stats.roc + 4979 adapter->stats.ruc + adapter->stats.roc +
@@ -5029,7 +5032,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
5029 adapter->stats.doosync++; 5032 adapter->stats.doosync++;
5030 /* The DMA Out of Sync is also indication of a spoof event 5033 /* The DMA Out of Sync is also indication of a spoof event
5031 * in IOV mode. Check the Wrong VM Behavior register to 5034 * in IOV mode. Check the Wrong VM Behavior register to
5032 * see if it is really a spoof event. */ 5035 * see if it is really a spoof event.
5036 */
5033 igb_check_wvbr(adapter); 5037 igb_check_wvbr(adapter);
5034 } 5038 }
5035 5039
@@ -5103,8 +5107,7 @@ static void igb_update_tx_dca(struct igb_adapter *adapter,
5103 if (hw->mac.type != e1000_82575) 5107 if (hw->mac.type != e1000_82575)
5104 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT; 5108 txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
5105 5109
5106 /* 5110 /* We can enable relaxed ordering for reads, but not writes when
5107 * We can enable relaxed ordering for reads, but not writes when
5108 * DCA is enabled. This is due to a known issue in some chipsets 5111 * DCA is enabled. This is due to a known issue in some chipsets
5109 * which will cause the DCA tag to be cleared. 5112 * which will cause the DCA tag to be cleared.
5110 */ 5113 */
@@ -5125,8 +5128,7 @@ static void igb_update_rx_dca(struct igb_adapter *adapter,
5125 if (hw->mac.type != e1000_82575) 5128 if (hw->mac.type != e1000_82575)
5126 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT; 5129 rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
5127 5130
5128 /* 5131 /* We can enable relaxed ordering for reads, but not writes when
5129 * We can enable relaxed ordering for reads, but not writes when
5130 * DCA is enabled. This is due to a known issue in some chipsets 5132 * DCA is enabled. This is due to a known issue in some chipsets
5131 * which will cause the DCA tag to be cleared. 5133 * which will cause the DCA tag to be cleared.
5132 */ 5134 */
@@ -5195,7 +5197,8 @@ static int __igb_notify_dca(struct device *dev, void *data)
5195 case DCA_PROVIDER_REMOVE: 5197 case DCA_PROVIDER_REMOVE:
5196 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 5198 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
5197 /* without this a class_device is left 5199 /* without this a class_device is left
5198 * hanging around in the sysfs model */ 5200 * hanging around in the sysfs model
5201 */
5199 dca_remove_requester(dev); 5202 dca_remove_requester(dev);
5200 dev_info(&pdev->dev, "DCA disabled\n"); 5203 dev_info(&pdev->dev, "DCA disabled\n");
5201 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 5204 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
@@ -5208,12 +5211,12 @@ static int __igb_notify_dca(struct device *dev, void *data)
5208} 5211}
5209 5212
5210static int igb_notify_dca(struct notifier_block *nb, unsigned long event, 5213static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
5211 void *p) 5214 void *p)
5212{ 5215{
5213 int ret_val; 5216 int ret_val;
5214 5217
5215 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event, 5218 ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
5216 __igb_notify_dca); 5219 __igb_notify_dca);
5217 5220
5218 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 5221 return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
5219} 5222}
@@ -5285,7 +5288,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5285 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 5288 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
5286 5289
5287 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC | 5290 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
5288 IGB_VF_FLAG_MULTI_PROMISC); 5291 IGB_VF_FLAG_MULTI_PROMISC);
5289 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 5292 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
5290 5293
5291 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 5294 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
@@ -5293,8 +5296,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5293 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC; 5296 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
5294 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 5297 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
5295 } else { 5298 } else {
5296 /* 5299 /* if we have hashes and we are clearing a multicast promisc
5297 * if we have hashes and we are clearing a multicast promisc
5298 * flag we need to write the hashes to the MTA as this step 5300 * flag we need to write the hashes to the MTA as this step
5299 * was previously skipped 5301 * was previously skipped
5300 */ 5302 */
@@ -5315,7 +5317,6 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
5315 return -EINVAL; 5317 return -EINVAL;
5316 5318
5317 return 0; 5319 return 0;
5318
5319} 5320}
5320 5321
5321static int igb_set_vf_multicasts(struct igb_adapter *adapter, 5322static int igb_set_vf_multicasts(struct igb_adapter *adapter,
@@ -5522,22 +5523,20 @@ static int igb_ndo_set_vf_vlan(struct net_device *netdev,
5522 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); 5523 "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
5523 if (test_bit(__IGB_DOWN, &adapter->state)) { 5524 if (test_bit(__IGB_DOWN, &adapter->state)) {
5524 dev_warn(&adapter->pdev->dev, 5525 dev_warn(&adapter->pdev->dev,
5525 "The VF VLAN has been set," 5526 "The VF VLAN has been set, but the PF device is not up.\n");
5526 " but the PF device is not up.\n");
5527 dev_warn(&adapter->pdev->dev, 5527 dev_warn(&adapter->pdev->dev,
5528 "Bring the PF device up before" 5528 "Bring the PF device up before attempting to use the VF device.\n");
5529 " attempting to use the VF device.\n");
5530 } 5529 }
5531 } else { 5530 } else {
5532 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan, 5531 igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
5533 false, vf); 5532 false, vf);
5534 igb_set_vmvir(adapter, vlan, vf); 5533 igb_set_vmvir(adapter, vlan, vf);
5535 igb_set_vmolr(adapter, vf, true); 5534 igb_set_vmolr(adapter, vf, true);
5536 adapter->vf_data[vf].pf_vlan = 0; 5535 adapter->vf_data[vf].pf_vlan = 0;
5537 adapter->vf_data[vf].pf_qos = 0; 5536 adapter->vf_data[vf].pf_qos = 0;
5538 } 5537 }
5539out: 5538out:
5540 return err; 5539 return err;
5541} 5540}
5542 5541
5543static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 5542static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -5615,8 +5614,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5615 5614
5616static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 5615static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
5617{ 5616{
5618 /* 5617 /* The VF MAC Address is stored in a packed array of bytes
5619 * The VF MAC Address is stored in a packed array of bytes
5620 * starting at the second 32 bit word of the msg array 5618 * starting at the second 32 bit word of the msg array
5621 */ 5619 */
5622 unsigned char *addr = (char *)&msg[1]; 5620 unsigned char *addr = (char *)&msg[1];
@@ -5665,11 +5663,9 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5665 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 5663 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
5666 return; 5664 return;
5667 5665
5668 /* 5666 /* until the vf completes a reset it should not be
5669 * until the vf completes a reset it should not be
5670 * allowed to start any configuration. 5667 * allowed to start any configuration.
5671 */ 5668 */
5672
5673 if (msgbuf[0] == E1000_VF_RESET) { 5669 if (msgbuf[0] == E1000_VF_RESET) {
5674 igb_vf_reset_msg(adapter, vf); 5670 igb_vf_reset_msg(adapter, vf);
5675 return; 5671 return;
@@ -5689,9 +5685,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5689 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 5685 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5690 else 5686 else
5691 dev_warn(&pdev->dev, 5687 dev_warn(&pdev->dev,
5692 "VF %d attempted to override administratively " 5688 "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
5693 "set MAC address\nReload the VF driver to " 5689 vf);
5694 "resume operations\n", vf);
5695 break; 5690 break;
5696 case E1000_VF_SET_PROMISC: 5691 case E1000_VF_SET_PROMISC:
5697 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 5692 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5706,9 +5701,8 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5706 retval = -1; 5701 retval = -1;
5707 if (vf_data->pf_vlan) 5702 if (vf_data->pf_vlan)
5708 dev_warn(&pdev->dev, 5703 dev_warn(&pdev->dev,
5709 "VF %d attempted to override administratively " 5704 "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
5710 "set VLAN tag\nReload the VF driver to " 5705 vf);
5711 "resume operations\n", vf);
5712 else 5706 else
5713 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 5707 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5714 break; 5708 break;
@@ -5777,9 +5771,9 @@ static void igb_set_uta(struct igb_adapter *adapter)
5777} 5771}
5778 5772
5779/** 5773/**
5780 * igb_intr_msi - Interrupt Handler 5774 * igb_intr_msi - Interrupt Handler
5781 * @irq: interrupt number 5775 * @irq: interrupt number
5782 * @data: pointer to a network interface device structure 5776 * @data: pointer to a network interface device structure
5783 **/ 5777 **/
5784static irqreturn_t igb_intr_msi(int irq, void *data) 5778static irqreturn_t igb_intr_msi(int irq, void *data)
5785{ 5779{
@@ -5822,9 +5816,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
5822} 5816}
5823 5817
5824/** 5818/**
5825 * igb_intr - Legacy Interrupt Handler 5819 * igb_intr - Legacy Interrupt Handler
5826 * @irq: interrupt number 5820 * @irq: interrupt number
5827 * @data: pointer to a network interface device structure 5821 * @data: pointer to a network interface device structure
5828 **/ 5822 **/
5829static irqreturn_t igb_intr(int irq, void *data) 5823static irqreturn_t igb_intr(int irq, void *data)
5830{ 5824{
@@ -5832,11 +5826,13 @@ static irqreturn_t igb_intr(int irq, void *data)
5832 struct igb_q_vector *q_vector = adapter->q_vector[0]; 5826 struct igb_q_vector *q_vector = adapter->q_vector[0];
5833 struct e1000_hw *hw = &adapter->hw; 5827 struct e1000_hw *hw = &adapter->hw;
5834 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 5828 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
5835 * need for the IMC write */ 5829 * need for the IMC write
5830 */
5836 u32 icr = rd32(E1000_ICR); 5831 u32 icr = rd32(E1000_ICR);
5837 5832
5838 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 5833 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
5839 * not set, then the adapter didn't send an interrupt */ 5834 * not set, then the adapter didn't send an interrupt
5835 */
5840 if (!(icr & E1000_ICR_INT_ASSERTED)) 5836 if (!(icr & E1000_ICR_INT_ASSERTED))
5841 return IRQ_NONE; 5837 return IRQ_NONE;
5842 5838
@@ -5895,15 +5891,15 @@ static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
5895} 5891}
5896 5892
5897/** 5893/**
5898 * igb_poll - NAPI Rx polling callback 5894 * igb_poll - NAPI Rx polling callback
5899 * @napi: napi polling structure 5895 * @napi: napi polling structure
5900 * @budget: count of how many packets we should handle 5896 * @budget: count of how many packets we should handle
5901 **/ 5897 **/
5902static int igb_poll(struct napi_struct *napi, int budget) 5898static int igb_poll(struct napi_struct *napi, int budget)
5903{ 5899{
5904 struct igb_q_vector *q_vector = container_of(napi, 5900 struct igb_q_vector *q_vector = container_of(napi,
5905 struct igb_q_vector, 5901 struct igb_q_vector,
5906 napi); 5902 napi);
5907 bool clean_complete = true; 5903 bool clean_complete = true;
5908 5904
5909#ifdef CONFIG_IGB_DCA 5905#ifdef CONFIG_IGB_DCA
@@ -5928,10 +5924,10 @@ static int igb_poll(struct napi_struct *napi, int budget)
5928} 5924}
5929 5925
5930/** 5926/**
5931 * igb_clean_tx_irq - Reclaim resources after transmit completes 5927 * igb_clean_tx_irq - Reclaim resources after transmit completes
5932 * @q_vector: pointer to q_vector containing needed info 5928 * @q_vector: pointer to q_vector containing needed info
5933 * 5929 *
5934 * returns true if ring is completely cleaned 5930 * returns true if ring is completely cleaned
5935 **/ 5931 **/
5936static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) 5932static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5937{ 5933{
@@ -6037,7 +6033,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6037 struct e1000_hw *hw = &adapter->hw; 6033 struct e1000_hw *hw = &adapter->hw;
6038 6034
6039 /* Detect a transmit hang in hardware, this serializes the 6035 /* Detect a transmit hang in hardware, this serializes the
6040 * check with the clearing of time_stamp and movement of i */ 6036 * check with the clearing of time_stamp and movement of i
6037 */
6041 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); 6038 clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
6042 if (tx_buffer->next_to_watch && 6039 if (tx_buffer->next_to_watch &&
6043 time_after(jiffies, tx_buffer->time_stamp + 6040 time_after(jiffies, tx_buffer->time_stamp +
@@ -6076,8 +6073,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6076 6073
6077#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 6074#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
6078 if (unlikely(total_packets && 6075 if (unlikely(total_packets &&
6079 netif_carrier_ok(tx_ring->netdev) && 6076 netif_carrier_ok(tx_ring->netdev) &&
6080 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { 6077 igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
6081 /* Make sure that anybody stopping the queue after this 6078 /* Make sure that anybody stopping the queue after this
6082 * sees the new next_to_clean. 6079 * sees the new next_to_clean.
6083 */ 6080 */
@@ -6098,11 +6095,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
6098} 6095}
6099 6096
6100/** 6097/**
6101 * igb_reuse_rx_page - page flip buffer and store it back on the ring 6098 * igb_reuse_rx_page - page flip buffer and store it back on the ring
6102 * @rx_ring: rx descriptor ring to store buffers on 6099 * @rx_ring: rx descriptor ring to store buffers on
6103 * @old_buff: donor buffer to have page reused 6100 * @old_buff: donor buffer to have page reused
6104 * 6101 *
6105 * Synchronizes page for reuse by the adapter 6102 * Synchronizes page for reuse by the adapter
6106 **/ 6103 **/
6107static void igb_reuse_rx_page(struct igb_ring *rx_ring, 6104static void igb_reuse_rx_page(struct igb_ring *rx_ring,
6108 struct igb_rx_buffer *old_buff) 6105 struct igb_rx_buffer *old_buff)
@@ -6162,19 +6159,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6162} 6159}
6163 6160
6164/** 6161/**
6165 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff 6162 * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
6166 * @rx_ring: rx descriptor ring to transact packets on 6163 * @rx_ring: rx descriptor ring to transact packets on
6167 * @rx_buffer: buffer containing page to add 6164 * @rx_buffer: buffer containing page to add
6168 * @rx_desc: descriptor containing length of buffer written by hardware 6165 * @rx_desc: descriptor containing length of buffer written by hardware
6169 * @skb: sk_buff to place the data into 6166 * @skb: sk_buff to place the data into
6170 * 6167 *
6171 * This function will add the data contained in rx_buffer->page to the skb. 6168 * This function will add the data contained in rx_buffer->page to the skb.
6172 * This is done either through a direct copy if the data in the buffer is 6169 * This is done either through a direct copy if the data in the buffer is
6173 * less than the skb header size, otherwise it will just attach the page as 6170 * less than the skb header size, otherwise it will just attach the page as
6174 * a frag to the skb. 6171 * a frag to the skb.
6175 * 6172 *
6176 * The function will then update the page offset if necessary and return 6173 * The function will then update the page offset if necessary and return
6177 * true if the buffer can be reused by the adapter. 6174 * true if the buffer can be reused by the adapter.
6178 **/ 6175 **/
6179static bool igb_add_rx_frag(struct igb_ring *rx_ring, 6176static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6180 struct igb_rx_buffer *rx_buffer, 6177 struct igb_rx_buffer *rx_buffer,
@@ -6317,8 +6314,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
6317 return NULL; 6314 return NULL;
6318 } 6315 }
6319 6316
6320 /* 6317 /* we will be copying header into skb->data in
6321 * we will be copying header into skb->data in
6322 * pskb_may_pull so it is in our interest to prefetch 6318 * pskb_may_pull so it is in our interest to prefetch
6323 * it now to avoid a possible cache miss 6319 * it now to avoid a possible cache miss
6324 */ 6320 */
@@ -6366,8 +6362,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
6366 if (igb_test_staterr(rx_desc, 6362 if (igb_test_staterr(rx_desc,
6367 E1000_RXDEXT_STATERR_TCPE | 6363 E1000_RXDEXT_STATERR_TCPE |
6368 E1000_RXDEXT_STATERR_IPE)) { 6364 E1000_RXDEXT_STATERR_IPE)) {
6369 /* 6365 /* work around errata with sctp packets where the TCPE aka
6370 * work around errata with sctp packets where the TCPE aka
6371 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 6366 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
6372 * packets, (aka let the stack check the crc32c) 6367 * packets, (aka let the stack check the crc32c)
6373 */ 6368 */
@@ -6398,15 +6393,15 @@ static inline void igb_rx_hash(struct igb_ring *ring,
6398} 6393}
6399 6394
6400/** 6395/**
6401 * igb_is_non_eop - process handling of non-EOP buffers 6396 * igb_is_non_eop - process handling of non-EOP buffers
6402 * @rx_ring: Rx ring being processed 6397 * @rx_ring: Rx ring being processed
6403 * @rx_desc: Rx descriptor for current buffer 6398 * @rx_desc: Rx descriptor for current buffer
6404 * @skb: current socket buffer containing buffer in progress 6399 * @skb: current socket buffer containing buffer in progress
6405 * 6400 *
6406 * This function updates next to clean. If the buffer is an EOP buffer 6401 * This function updates next to clean. If the buffer is an EOP buffer
6407 * this function exits returning false, otherwise it will place the 6402 * this function exits returning false, otherwise it will place the
6408 * sk_buff in the next buffer to be chained and return true indicating 6403 * sk_buff in the next buffer to be chained and return true indicating
6409 * that this is in fact a non-EOP buffer. 6404 * that this is in fact a non-EOP buffer.
6410 **/ 6405 **/
6411static bool igb_is_non_eop(struct igb_ring *rx_ring, 6406static bool igb_is_non_eop(struct igb_ring *rx_ring,
6412 union e1000_adv_rx_desc *rx_desc) 6407 union e1000_adv_rx_desc *rx_desc)
@@ -6426,15 +6421,15 @@ static bool igb_is_non_eop(struct igb_ring *rx_ring,
6426} 6421}
6427 6422
6428/** 6423/**
6429 * igb_get_headlen - determine size of header for LRO/GRO 6424 * igb_get_headlen - determine size of header for LRO/GRO
6430 * @data: pointer to the start of the headers 6425 * @data: pointer to the start of the headers
6431 * @max_len: total length of section to find headers in 6426 * @max_len: total length of section to find headers in
6432 * 6427 *
6433 * This function is meant to determine the length of headers that will 6428 * This function is meant to determine the length of headers that will
6434 * be recognized by hardware for LRO, and GRO offloads. The main 6429 * be recognized by hardware for LRO, and GRO offloads. The main
6435 * motivation of doing this is to only perform one pull for IPv4 TCP 6430 * motivation of doing this is to only perform one pull for IPv4 TCP
6436 * packets so that we can do basic things like calculating the gso_size 6431 * packets so that we can do basic things like calculating the gso_size
6437 * based on the average data per packet. 6432 * based on the average data per packet.
6438 **/ 6433 **/
6439static unsigned int igb_get_headlen(unsigned char *data, 6434static unsigned int igb_get_headlen(unsigned char *data,
6440 unsigned int max_len) 6435 unsigned int max_len)
@@ -6521,8 +6516,7 @@ static unsigned int igb_get_headlen(unsigned char *data,
6521 hdr.network += sizeof(struct udphdr); 6516 hdr.network += sizeof(struct udphdr);
6522 } 6517 }
6523 6518
6524 /* 6519 /* If everything has gone correctly hdr.network should be the
6525 * If everything has gone correctly hdr.network should be the
6526 * data section of the packet and will be the end of the header. 6520 * data section of the packet and will be the end of the header.
6527 * If not then it probably represents the end of the last recognized 6521 * If not then it probably represents the end of the last recognized
6528 * header. 6522 * header.
@@ -6534,17 +6528,17 @@ static unsigned int igb_get_headlen(unsigned char *data,
6534} 6528}
6535 6529
6536/** 6530/**
6537 * igb_pull_tail - igb specific version of skb_pull_tail 6531 * igb_pull_tail - igb specific version of skb_pull_tail
6538 * @rx_ring: rx descriptor ring packet is being transacted on 6532 * @rx_ring: rx descriptor ring packet is being transacted on
6539 * @rx_desc: pointer to the EOP Rx descriptor 6533 * @rx_desc: pointer to the EOP Rx descriptor
6540 * @skb: pointer to current skb being adjusted 6534 * @skb: pointer to current skb being adjusted
6541 * 6535 *
6542 * This function is an igb specific version of __pskb_pull_tail. The 6536 * This function is an igb specific version of __pskb_pull_tail. The
6543 * main difference between this version and the original function is that 6537 * main difference between this version and the original function is that
6544 * this function can make several assumptions about the state of things 6538 * this function can make several assumptions about the state of things
6545 * that allow for significant optimizations versus the standard function. 6539 * that allow for significant optimizations versus the standard function.
6546 * As a result we can do things like drop a frag and maintain an accurate 6540 * As a result we can do things like drop a frag and maintain an accurate
6547 * truesize for the skb. 6541 * truesize for the skb.
6548 */ 6542 */
6549static void igb_pull_tail(struct igb_ring *rx_ring, 6543static void igb_pull_tail(struct igb_ring *rx_ring,
6550 union e1000_adv_rx_desc *rx_desc, 6544 union e1000_adv_rx_desc *rx_desc,
@@ -6554,8 +6548,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6554 unsigned char *va; 6548 unsigned char *va;
6555 unsigned int pull_len; 6549 unsigned int pull_len;
6556 6550
6557 /* 6551 /* it is valid to use page_address instead of kmap since we are
6558 * it is valid to use page_address instead of kmap since we are
6559 * working with pages allocated out of the lomem pool per 6552 * working with pages allocated out of the lomem pool per
6560 * alloc_page(GFP_ATOMIC) 6553 * alloc_page(GFP_ATOMIC)
6561 */ 6554 */
@@ -6575,8 +6568,7 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6575 va += IGB_TS_HDR_LEN; 6568 va += IGB_TS_HDR_LEN;
6576 } 6569 }
6577 6570
6578 /* 6571 /* we need the header to contain the greater of either ETH_HLEN or
6579 * we need the header to contain the greater of either ETH_HLEN or
6580 * 60 bytes if the skb->len is less than 60 for skb_pad. 6572 * 60 bytes if the skb->len is less than 60 for skb_pad.
6581 */ 6573 */
6582 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN); 6574 pull_len = igb_get_headlen(va, IGB_RX_HDR_LEN);
@@ -6592,24 +6584,23 @@ static void igb_pull_tail(struct igb_ring *rx_ring,
6592} 6584}
6593 6585
6594/** 6586/**
6595 * igb_cleanup_headers - Correct corrupted or empty headers 6587 * igb_cleanup_headers - Correct corrupted or empty headers
6596 * @rx_ring: rx descriptor ring packet is being transacted on 6588 * @rx_ring: rx descriptor ring packet is being transacted on
6597 * @rx_desc: pointer to the EOP Rx descriptor 6589 * @rx_desc: pointer to the EOP Rx descriptor
6598 * @skb: pointer to current skb being fixed 6590 * @skb: pointer to current skb being fixed
6599 * 6591 *
6600 * Address the case where we are pulling data in on pages only 6592 * Address the case where we are pulling data in on pages only
6601 * and as such no data is present in the skb header. 6593 * and as such no data is present in the skb header.
6602 * 6594 *
6603 * In addition if skb is not at least 60 bytes we need to pad it so that 6595 * In addition if skb is not at least 60 bytes we need to pad it so that
6604 * it is large enough to qualify as a valid Ethernet frame. 6596 * it is large enough to qualify as a valid Ethernet frame.
6605 * 6597 *
6606 * Returns true if an error was encountered and skb was freed. 6598 * Returns true if an error was encountered and skb was freed.
6607 **/ 6599 **/
6608static bool igb_cleanup_headers(struct igb_ring *rx_ring, 6600static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6609 union e1000_adv_rx_desc *rx_desc, 6601 union e1000_adv_rx_desc *rx_desc,
6610 struct sk_buff *skb) 6602 struct sk_buff *skb)
6611{ 6603{
6612
6613 if (unlikely((igb_test_staterr(rx_desc, 6604 if (unlikely((igb_test_staterr(rx_desc,
6614 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) { 6605 E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
6615 struct net_device *netdev = rx_ring->netdev; 6606 struct net_device *netdev = rx_ring->netdev;
@@ -6636,14 +6627,14 @@ static bool igb_cleanup_headers(struct igb_ring *rx_ring,
6636} 6627}
6637 6628
6638/** 6629/**
6639 * igb_process_skb_fields - Populate skb header fields from Rx descriptor 6630 * igb_process_skb_fields - Populate skb header fields from Rx descriptor
6640 * @rx_ring: rx descriptor ring packet is being transacted on 6631 * @rx_ring: rx descriptor ring packet is being transacted on
6641 * @rx_desc: pointer to the EOP Rx descriptor 6632 * @rx_desc: pointer to the EOP Rx descriptor
6642 * @skb: pointer to current skb being populated 6633 * @skb: pointer to current skb being populated
6643 * 6634 *
6644 * This function checks the ring, descriptor, and packet information in 6635 * This function checks the ring, descriptor, and packet information in
6645 * order to populate the hash, checksum, VLAN, timestamp, protocol, and 6636 * order to populate the hash, checksum, VLAN, timestamp, protocol, and
6646 * other fields within the skb. 6637 * other fields within the skb.
6647 **/ 6638 **/
6648static void igb_process_skb_fields(struct igb_ring *rx_ring, 6639static void igb_process_skb_fields(struct igb_ring *rx_ring,
6649 union e1000_adv_rx_desc *rx_desc, 6640 union e1000_adv_rx_desc *rx_desc,
@@ -6774,8 +6765,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
6774 /* map page for use */ 6765 /* map page for use */
6775 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); 6766 dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
6776 6767
6777 /* 6768 /* if mapping failed free memory back to system since
6778 * if mapping failed free memory back to system since
6779 * there isn't much point in holding memory we can't use 6769 * there isn't much point in holding memory we can't use
6780 */ 6770 */
6781 if (dma_mapping_error(rx_ring->dev, dma)) { 6771 if (dma_mapping_error(rx_ring->dev, dma)) {
@@ -6801,8 +6791,8 @@ static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
6801} 6791}
6802 6792
6803/** 6793/**
6804 * igb_alloc_rx_buffers - Replace used receive buffers; packet split 6794 * igb_alloc_rx_buffers - Replace used receive buffers; packet split
6805 * @adapter: address of board private structure 6795 * @adapter: address of board private structure
6806 **/ 6796 **/
6807void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) 6797void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6808{ 6798{
@@ -6822,8 +6812,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6822 if (!igb_alloc_mapped_page(rx_ring, bi)) 6812 if (!igb_alloc_mapped_page(rx_ring, bi))
6823 break; 6813 break;
6824 6814
6825 /* 6815 /* Refresh the desc even if buffer_addrs didn't change
6826 * Refresh the desc even if buffer_addrs didn't change
6827 * because each write-back erases this info. 6816 * because each write-back erases this info.
6828 */ 6817 */
6829 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + 6818 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
@@ -6854,8 +6843,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
6854 /* update next to alloc since we have filled the ring */ 6843 /* update next to alloc since we have filled the ring */
6855 rx_ring->next_to_alloc = i; 6844 rx_ring->next_to_alloc = i;
6856 6845
6857 /* 6846 /* Force memory writes to complete before letting h/w
6858 * Force memory writes to complete before letting h/w
6859 * know there are new descriptors to fetch. (Only 6847 * know there are new descriptors to fetch. (Only
6860 * applicable for weak-ordered memory model archs, 6848 * applicable for weak-ordered memory model archs,
6861 * such as IA-64). 6849 * such as IA-64).
@@ -7016,7 +7004,8 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
7016 mac->autoneg = 0; 7004 mac->autoneg = 0;
7017 7005
7018 /* Make sure dplx is at most 1 bit and lsb of speed is not set 7006 /* Make sure dplx is at most 1 bit and lsb of speed is not set
7019 * for the switch() below to work */ 7007 * for the switch() below to work
7008 */
7020 if ((spd & 1) || (dplx & ~1)) 7009 if ((spd & 1) || (dplx & ~1))
7021 goto err_inval; 7010 goto err_inval;
7022 7011
@@ -7131,7 +7120,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
7131 igb_power_up_link(adapter); 7120 igb_power_up_link(adapter);
7132 7121
7133 /* Release control of h/w to f/w. If f/w is AMT enabled, this 7122 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7134 * would have already happened in close and is redundant. */ 7123 * would have already happened in close and is redundant.
7124 */
7135 igb_release_hw_control(adapter); 7125 igb_release_hw_control(adapter);
7136 7126
7137 pci_disable_device(pdev); 7127 pci_disable_device(pdev);
@@ -7193,7 +7183,8 @@ static int igb_resume(struct device *dev)
7193 igb_reset(adapter); 7183 igb_reset(adapter);
7194 7184
7195 /* let the f/w know that the h/w is now under the control of the 7185 /* let the f/w know that the h/w is now under the control of the
7196 * driver. */ 7186 * driver.
7187 */
7197 igb_get_hw_control(adapter); 7188 igb_get_hw_control(adapter);
7198 7189
7199 wr32(E1000_WUS, ~0); 7190 wr32(E1000_WUS, ~0);
@@ -7329,8 +7320,7 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
7329} 7320}
7330 7321
7331#ifdef CONFIG_NET_POLL_CONTROLLER 7322#ifdef CONFIG_NET_POLL_CONTROLLER
7332/* 7323/* Polling 'interrupt' - used by things like netconsole to send skbs
7333 * Polling 'interrupt' - used by things like netconsole to send skbs
7334 * without having to re-enable interrupts. It's not called while 7324 * without having to re-enable interrupts. It's not called while
7335 * the interrupt routine is executing. 7325 * the interrupt routine is executing.
7336 */ 7326 */
@@ -7353,13 +7343,13 @@ static void igb_netpoll(struct net_device *netdev)
7353#endif /* CONFIG_NET_POLL_CONTROLLER */ 7343#endif /* CONFIG_NET_POLL_CONTROLLER */
7354 7344
7355/** 7345/**
7356 * igb_io_error_detected - called when PCI error is detected 7346 * igb_io_error_detected - called when PCI error is detected
7357 * @pdev: Pointer to PCI device 7347 * @pdev: Pointer to PCI device
7358 * @state: The current pci connection state 7348 * @state: The current pci connection state
7359 * 7349 *
7360 * This function is called after a PCI bus error affecting 7350 * This function is called after a PCI bus error affecting
7361 * this device has been detected. 7351 * this device has been detected.
7362 */ 7352 **/
7363static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev, 7353static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7364 pci_channel_state_t state) 7354 pci_channel_state_t state)
7365{ 7355{
@@ -7380,12 +7370,12 @@ static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
7380} 7370}
7381 7371
7382/** 7372/**
7383 * igb_io_slot_reset - called after the pci bus has been reset. 7373 * igb_io_slot_reset - called after the pci bus has been reset.
7384 * @pdev: Pointer to PCI device 7374 * @pdev: Pointer to PCI device
7385 * 7375 *
7386 * Restart the card from scratch, as if from a cold-boot. Implementation 7376 * Restart the card from scratch, as if from a cold-boot. Implementation
7387 * resembles the first-half of the igb_resume routine. 7377 * resembles the first-half of the igb_resume routine.
7388 */ 7378 **/
7389static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev) 7379static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7390{ 7380{
7391 struct net_device *netdev = pci_get_drvdata(pdev); 7381 struct net_device *netdev = pci_get_drvdata(pdev);
@@ -7413,8 +7403,9 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7413 7403
7414 err = pci_cleanup_aer_uncorrect_error_status(pdev); 7404 err = pci_cleanup_aer_uncorrect_error_status(pdev);
7415 if (err) { 7405 if (err) {
7416 dev_err(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status " 7406 dev_err(&pdev->dev,
7417 "failed 0x%0x\n", err); 7407 "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
7408 err);
7418 /* non-fatal, continue */ 7409 /* non-fatal, continue */
7419 } 7410 }
7420 7411
@@ -7422,12 +7413,12 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
7422} 7413}
7423 7414
7424/** 7415/**
7425 * igb_io_resume - called when traffic can start flowing again. 7416 * igb_io_resume - called when traffic can start flowing again.
7426 * @pdev: Pointer to PCI device 7417 * @pdev: Pointer to PCI device
7427 * 7418 *
7428 * This callback is called when the error recovery driver tells us that 7419 * This callback is called when the error recovery driver tells us that
7429 * its OK to resume normal operation. Implementation resembles the 7420 * its OK to resume normal operation. Implementation resembles the
7430 * second-half of the igb_resume routine. 7421 * second-half of the igb_resume routine.
7431 */ 7422 */
7432static void igb_io_resume(struct pci_dev *pdev) 7423static void igb_io_resume(struct pci_dev *pdev)
7433{ 7424{
@@ -7444,12 +7435,13 @@ static void igb_io_resume(struct pci_dev *pdev)
7444 netif_device_attach(netdev); 7435 netif_device_attach(netdev);
7445 7436
7446 /* let the f/w know that the h/w is now under the control of the 7437 /* let the f/w know that the h/w is now under the control of the
7447 * driver. */ 7438 * driver.
7439 */
7448 igb_get_hw_control(adapter); 7440 igb_get_hw_control(adapter);
7449} 7441}
7450 7442
7451static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index, 7443static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7452 u8 qsel) 7444 u8 qsel)
7453{ 7445{
7454 u32 rar_low, rar_high; 7446 u32 rar_low, rar_high;
7455 struct e1000_hw *hw = &adapter->hw; 7447 struct e1000_hw *hw = &adapter->hw;
@@ -7458,7 +7450,7 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7458 * from network order (big endian) to little endian 7450 * from network order (big endian) to little endian
7459 */ 7451 */
7460 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) | 7452 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
7461 ((u32) addr[2] << 16) | ((u32) addr[3] << 24)); 7453 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
7462 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 7454 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
7463 7455
7464 /* Indicate to hardware the Address is Valid. */ 7456 /* Indicate to hardware the Address is Valid. */
@@ -7476,11 +7468,12 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
7476} 7468}
7477 7469
7478static int igb_set_vf_mac(struct igb_adapter *adapter, 7470static int igb_set_vf_mac(struct igb_adapter *adapter,
7479 int vf, unsigned char *mac_addr) 7471 int vf, unsigned char *mac_addr)
7480{ 7472{
7481 struct e1000_hw *hw = &adapter->hw; 7473 struct e1000_hw *hw = &adapter->hw;
7482 /* VF MAC addresses start at end of receive addresses and moves 7474 /* VF MAC addresses start at end of receive addresses and moves
7483 * torwards the first, as a result a collision should not be possible */ 7475 * towards the first, as a result a collision should not be possible
7476 */
7484 int rar_entry = hw->mac.rar_entry_count - (vf + 1); 7477 int rar_entry = hw->mac.rar_entry_count - (vf + 1);
7485 7478
7486 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 7479 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
@@ -7497,13 +7490,13 @@ static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
7497 return -EINVAL; 7490 return -EINVAL;
7498 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC; 7491 adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
7499 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); 7492 dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
7500 dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" 7493 dev_info(&adapter->pdev->dev,
7501 " change effective."); 7494 "Reload the VF driver to make this change effective.");
7502 if (test_bit(__IGB_DOWN, &adapter->state)) { 7495 if (test_bit(__IGB_DOWN, &adapter->state)) {
7503 dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," 7496 dev_warn(&adapter->pdev->dev,
7504 " but the PF device is not up.\n"); 7497 "The VF MAC address has been set, but the PF device is not up.\n");
7505 dev_warn(&adapter->pdev->dev, "Bring the PF device up before" 7498 dev_warn(&adapter->pdev->dev,
7506 " attempting to use the VF device.\n"); 7499 "Bring the PF device up before attempting to use the VF device.\n");
7507 } 7500 }
7508 return igb_set_vf_mac(adapter, vf, mac); 7501 return igb_set_vf_mac(adapter, vf, mac);
7509} 7502}
@@ -7530,19 +7523,19 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
7530 /* Calculate the rate factor values to set */ 7523 /* Calculate the rate factor values to set */
7531 rf_int = link_speed / tx_rate; 7524 rf_int = link_speed / tx_rate;
7532 rf_dec = (link_speed - (rf_int * tx_rate)); 7525 rf_dec = (link_speed - (rf_int * tx_rate));
7533 rf_dec = (rf_dec * (1<<E1000_RTTBCNRC_RF_INT_SHIFT)) / tx_rate; 7526 rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
7527 tx_rate;
7534 7528
7535 bcnrc_val = E1000_RTTBCNRC_RS_ENA; 7529 bcnrc_val = E1000_RTTBCNRC_RS_ENA;
7536 bcnrc_val |= ((rf_int<<E1000_RTTBCNRC_RF_INT_SHIFT) & 7530 bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
7537 E1000_RTTBCNRC_RF_INT_MASK); 7531 E1000_RTTBCNRC_RF_INT_MASK);
7538 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK); 7532 bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
7539 } else { 7533 } else {
7540 bcnrc_val = 0; 7534 bcnrc_val = 0;
7541 } 7535 }
7542 7536
7543 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ 7537 wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
7544 /* 7538 /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7545 * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
7546 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. 7539 * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
7547 */ 7540 */
7548 wr32(E1000_RTTBCNRM, 0x14); 7541 wr32(E1000_RTTBCNRM, 0x14);
@@ -7564,8 +7557,7 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7564 reset_rate = true; 7557 reset_rate = true;
7565 adapter->vf_rate_link_speed = 0; 7558 adapter->vf_rate_link_speed = 0;
7566 dev_info(&adapter->pdev->dev, 7559 dev_info(&adapter->pdev->dev,
7567 "Link speed has been changed. VF Transmit " 7560 "Link speed has been changed. VF Transmit rate is disabled\n");
7568 "rate is disabled\n");
7569 } 7561 }
7570 7562
7571 for (i = 0; i < adapter->vfs_allocated_count; i++) { 7563 for (i = 0; i < adapter->vfs_allocated_count; i++) {
@@ -7573,8 +7565,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
7573 adapter->vf_data[i].tx_rate = 0; 7565 adapter->vf_data[i].tx_rate = 0;
7574 7566
7575 igb_set_vf_rate_limit(&adapter->hw, i, 7567 igb_set_vf_rate_limit(&adapter->hw, i,
7576 adapter->vf_data[i].tx_rate, 7568 adapter->vf_data[i].tx_rate,
7577 actual_link_speed); 7569 actual_link_speed);
7578 } 7570 }
7579} 7571}
7580 7572
@@ -7645,7 +7637,7 @@ static void igb_vmm_control(struct igb_adapter *adapter)
7645 igb_vmdq_set_loopback_pf(hw, true); 7637 igb_vmdq_set_loopback_pf(hw, true);
7646 igb_vmdq_set_replication_pf(hw, true); 7638 igb_vmdq_set_replication_pf(hw, true);
7647 igb_vmdq_set_anti_spoofing_pf(hw, true, 7639 igb_vmdq_set_anti_spoofing_pf(hw, true,
7648 adapter->vfs_allocated_count); 7640 adapter->vfs_allocated_count);
7649 } else { 7641 } else {
7650 igb_vmdq_set_loopback_pf(hw, false); 7642 igb_vmdq_set_loopback_pf(hw, false);
7651 igb_vmdq_set_replication_pf(hw, false); 7643 igb_vmdq_set_replication_pf(hw, false);
@@ -7665,8 +7657,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7665 /* force threshold to 0. */ 7657 /* force threshold to 0. */
7666 wr32(E1000_DMCTXTH, 0); 7658 wr32(E1000_DMCTXTH, 0);
7667 7659
7668 /* 7660 /* DMA Coalescing high water mark needs to be greater
7669 * DMA Coalescing high water mark needs to be greater
7670 * than the Rx threshold. Set hwm to PBA - max frame 7661 * than the Rx threshold. Set hwm to PBA - max frame
7671 * size in 16B units, capping it at PBA - 6KB. 7662 * size in 16B units, capping it at PBA - 6KB.
7672 */ 7663 */
@@ -7679,8 +7670,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7679 & E1000_FCRTC_RTH_COAL_MASK); 7670 & E1000_FCRTC_RTH_COAL_MASK);
7680 wr32(E1000_FCRTC, reg); 7671 wr32(E1000_FCRTC, reg);
7681 7672
7682 /* 7673 /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
7683 * Set the DMA Coalescing Rx threshold to PBA - 2 * max
7684 * frame size, capping it at PBA - 10KB. 7674 * frame size, capping it at PBA - 10KB.
7685 */ 7675 */
7686 dmac_thr = pba - adapter->max_frame_size / 512; 7676 dmac_thr = pba - adapter->max_frame_size / 512;
@@ -7701,8 +7691,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7701 reg &= ~E1000_DMACR_DC_BMC2OSW_EN; 7691 reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
7702 wr32(E1000_DMACR, reg); 7692 wr32(E1000_DMACR, reg);
7703 7693
7704 /* 7694 /* no lower threshold to disable
7705 * no lower threshold to disable
7706 * coalescing(smart fifb)-UTRESH=0 7695 * coalescing(smart fifb)-UTRESH=0
7707 */ 7696 */
7708 wr32(E1000_DMCRTRH, 0); 7697 wr32(E1000_DMCRTRH, 0);
@@ -7711,15 +7700,13 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7711 7700
7712 wr32(E1000_DMCTLX, reg); 7701 wr32(E1000_DMCTLX, reg);
7713 7702
7714 /* 7703 /* free space in tx packet buffer to wake from
7715 * free space in tx packet buffer to wake from
7716 * DMA coal 7704 * DMA coal
7717 */ 7705 */
7718 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - 7706 wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
7719 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); 7707 (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
7720 7708
7721 /* 7709 /* make low power state decision controlled
7722 * make low power state decision controlled
7723 * by DMA coal 7710 * by DMA coal
7724 */ 7711 */
7725 reg = rd32(E1000_PCIEMISC); 7712 reg = rd32(E1000_PCIEMISC);
@@ -7733,7 +7720,8 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7733 } 7720 }
7734} 7721}
7735 7722
7736/* igb_read_i2c_byte - Reads 8 bit word over I2C 7723/**
7724 * igb_read_i2c_byte - Reads 8 bit word over I2C
7737 * @hw: pointer to hardware structure 7725 * @hw: pointer to hardware structure
7738 * @byte_offset: byte offset to read 7726 * @byte_offset: byte offset to read
7739 * @dev_addr: device address 7727 * @dev_addr: device address
@@ -7741,9 +7729,9 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
7741 * 7729 *
7742 * Performs byte read operation over I2C interface at 7730 * Performs byte read operation over I2C interface at
7743 * a specified device address. 7731 * a specified device address.
7744 */ 7732 **/
7745s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7733s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7746 u8 dev_addr, u8 *data) 7734 u8 dev_addr, u8 *data)
7747{ 7735{
7748 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7736 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7749 struct i2c_client *this_client = adapter->i2c_client; 7737 struct i2c_client *this_client = adapter->i2c_client;
@@ -7770,7 +7758,8 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7770 } 7758 }
7771} 7759}
7772 7760
7773/* igb_write_i2c_byte - Writes 8 bit word over I2C 7761/**
7762 * igb_write_i2c_byte - Writes 8 bit word over I2C
7774 * @hw: pointer to hardware structure 7763 * @hw: pointer to hardware structure
7775 * @byte_offset: byte offset to write 7764 * @byte_offset: byte offset to write
7776 * @dev_addr: device address 7765 * @dev_addr: device address
@@ -7778,9 +7767,9 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7778 * 7767 *
7779 * Performs byte write operation over I2C interface at 7768 * Performs byte write operation over I2C interface at
7780 * a specified device address. 7769 * a specified device address.
7781 */ 7770 **/
7782s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, 7771s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
7783 u8 dev_addr, u8 data) 7772 u8 dev_addr, u8 data)
7784{ 7773{
7785 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw); 7774 struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
7786 struct i2c_client *this_client = adapter->i2c_client; 7775 struct i2c_client *this_client = adapter->i2c_client;