aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb')
-rw-r--r--drivers/net/igb/e1000_82575.c18
-rw-r--r--drivers/net/igb/e1000_defines.h31
-rw-r--r--drivers/net/igb/e1000_hw.h2
-rw-r--r--drivers/net/igb/e1000_phy.c206
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/igb.h11
-rw-r--r--drivers/net/igb/igb_ethtool.c52
-rw-r--r--drivers/net/igb/igb_main.c164
8 files changed, 408 insertions, 78 deletions
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 187622f1c816..bc183f5487cb 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -132,6 +132,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
132 case E1000_DEV_ID_82580_SERDES: 132 case E1000_DEV_ID_82580_SERDES:
133 case E1000_DEV_ID_82580_SGMII: 133 case E1000_DEV_ID_82580_SGMII:
134 case E1000_DEV_ID_82580_COPPER_DUAL: 134 case E1000_DEV_ID_82580_COPPER_DUAL:
135 case E1000_DEV_ID_DH89XXCC_SGMII:
136 case E1000_DEV_ID_DH89XXCC_SERDES:
135 mac->type = e1000_82580; 137 mac->type = e1000_82580;
136 break; 138 break;
137 case E1000_DEV_ID_I350_COPPER: 139 case E1000_DEV_ID_I350_COPPER:
@@ -282,10 +284,18 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
282 284
283 /* Verify phy id and set remaining function pointers */ 285 /* Verify phy id and set remaining function pointers */
284 switch (phy->id) { 286 switch (phy->id) {
287 case I347AT4_E_PHY_ID:
288 case M88E1112_E_PHY_ID:
285 case M88E1111_I_PHY_ID: 289 case M88E1111_I_PHY_ID:
286 phy->type = e1000_phy_m88; 290 phy->type = e1000_phy_m88;
287 phy->ops.get_phy_info = igb_get_phy_info_m88; 291 phy->ops.get_phy_info = igb_get_phy_info_m88;
288 phy->ops.get_cable_length = igb_get_cable_length_m88; 292
293 if (phy->id == I347AT4_E_PHY_ID ||
294 phy->id == M88E1112_E_PHY_ID)
295 phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
296 else
297 phy->ops.get_cable_length = igb_get_cable_length_m88;
298
289 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; 299 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
290 break; 300 break;
291 case IGP03E1000_E_PHY_ID: 301 case IGP03E1000_E_PHY_ID:
@@ -1058,7 +1068,11 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
1058 } 1068 }
1059 switch (hw->phy.type) { 1069 switch (hw->phy.type) {
1060 case e1000_phy_m88: 1070 case e1000_phy_m88:
1061 ret_val = igb_copper_link_setup_m88(hw); 1071 if (hw->phy.id == I347AT4_E_PHY_ID ||
1072 hw->phy.id == M88E1112_E_PHY_ID)
1073 ret_val = igb_copper_link_setup_m88_gen2(hw);
1074 else
1075 ret_val = igb_copper_link_setup_m88(hw);
1062 break; 1076 break;
1063 case e1000_phy_igp_3: 1077 case e1000_phy_igp_3:
1064 ret_val = igb_copper_link_setup_igp(hw); 1078 ret_val = igb_copper_link_setup_igp(hw);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index bbd2ec308eb0..62222796a8b3 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -634,6 +634,8 @@
634 * E = External 634 * E = External
635 */ 635 */
636#define M88E1111_I_PHY_ID 0x01410CC0 636#define M88E1111_I_PHY_ID 0x01410CC0
637#define M88E1112_E_PHY_ID 0x01410C90
638#define I347AT4_E_PHY_ID 0x01410DC0
637#define IGP03E1000_E_PHY_ID 0x02A80390 639#define IGP03E1000_E_PHY_ID 0x02A80390
638#define I82580_I_PHY_ID 0x015403A0 640#define I82580_I_PHY_ID 0x015403A0
639#define I350_I_PHY_ID 0x015403B0 641#define I350_I_PHY_ID 0x015403B0
@@ -702,6 +704,35 @@
702#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100 704#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
703#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */ 705#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
704 706
707/* Intel i347-AT4 Registers */
708
709#define I347AT4_PCDL 0x10 /* PHY Cable Diagnostics Length */
710#define I347AT4_PCDC 0x15 /* PHY Cable Diagnostics Control */
711#define I347AT4_PAGE_SELECT 0x16
712
713/* i347-AT4 Extended PHY Specific Control Register */
714
715/*
716 * Number of times we will attempt to autonegotiate before downshifting if we
717 * are the master
718 */
719#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
720#define I347AT4_PSCR_DOWNSHIFT_MASK 0x7000
721#define I347AT4_PSCR_DOWNSHIFT_1X 0x0000
722#define I347AT4_PSCR_DOWNSHIFT_2X 0x1000
723#define I347AT4_PSCR_DOWNSHIFT_3X 0x2000
724#define I347AT4_PSCR_DOWNSHIFT_4X 0x3000
725#define I347AT4_PSCR_DOWNSHIFT_5X 0x4000
726#define I347AT4_PSCR_DOWNSHIFT_6X 0x5000
727#define I347AT4_PSCR_DOWNSHIFT_7X 0x6000
728#define I347AT4_PSCR_DOWNSHIFT_8X 0x7000
729
730/* i347-AT4 PHY Cable Diagnostics Control */
731#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
732
733/* Marvell 1112 only registers */
734#define M88E1112_VCT_DSP_DISTANCE 0x001A
735
705/* M88EC018 Rev 2 specific DownShift settings */ 736/* M88EC018 Rev 2 specific DownShift settings */
706#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00 737#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
707#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800 738#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index cb8db78b1a05..c0b017f8d782 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,6 +54,8 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438
57#define E1000_DEV_ID_I350_COPPER 0x1521 59#define E1000_DEV_ID_I350_COPPER 0x1521
58#define E1000_DEV_ID_I350_FIBER 0x1522 60#define E1000_DEV_ID_I350_FIBER 0x1522
59#define E1000_DEV_ID_I350_SERDES 0x1523 61#define E1000_DEV_ID_I350_SERDES 0x1523
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index cf1f32300923..ddd036a78999 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -570,6 +570,89 @@ out:
570} 570}
571 571
572/** 572/**
573 * igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
574 * @hw: pointer to the HW structure
575 *
576 * Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
577 * Also enables and sets the downshift parameters.
578 **/
579s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
580{
581 struct e1000_phy_info *phy = &hw->phy;
582 s32 ret_val;
583 u16 phy_data;
584
585 if (phy->reset_disable) {
586 ret_val = 0;
587 goto out;
588 }
589
590 /* Enable CRS on Tx. This must be set for half-duplex operation. */
591 ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
592 if (ret_val)
593 goto out;
594
595 /*
596 * Options:
597 * MDI/MDI-X = 0 (default)
598 * 0 - Auto for all speeds
599 * 1 - MDI mode
600 * 2 - MDI-X mode
601 * 3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
602 */
603 phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
604
605 switch (phy->mdix) {
606 case 1:
607 phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
608 break;
609 case 2:
610 phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
611 break;
612 case 3:
613 /* M88E1112 does not support this mode) */
614 if (phy->id != M88E1112_E_PHY_ID) {
615 phy_data |= M88E1000_PSCR_AUTO_X_1000T;
616 break;
617 }
618 case 0:
619 default:
620 phy_data |= M88E1000_PSCR_AUTO_X_MODE;
621 break;
622 }
623
624 /*
625 * Options:
626 * disable_polarity_correction = 0 (default)
627 * Automatic Correction for Reversed Cable Polarity
628 * 0 - Disabled
629 * 1 - Enabled
630 */
631 phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
632 if (phy->disable_polarity_correction == 1)
633 phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
634
635 /* Enable downshift and setting it to X6 */
636 phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
637 phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
638 phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
639
640 ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
641 if (ret_val)
642 goto out;
643
644 /* Commit the changes. */
645 ret_val = igb_phy_sw_reset(hw);
646 if (ret_val) {
647 hw_dbg("Error committing the PHY changes\n");
648 goto out;
649 }
650
651out:
652 return ret_val;
653}
654
655/**
573 * igb_copper_link_setup_igp - Setup igp PHY's for copper link 656 * igb_copper_link_setup_igp - Setup igp PHY's for copper link
574 * @hw: pointer to the HW structure 657 * @hw: pointer to the HW structure
575 * 658 *
@@ -1124,18 +1207,25 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1124 goto out; 1207 goto out;
1125 1208
1126 if (!link) { 1209 if (!link) {
1127 /* 1210 if (hw->phy.type != e1000_phy_m88 ||
1128 * We didn't get link. 1211 hw->phy.id == I347AT4_E_PHY_ID ||
1129 * Reset the DSP and cross our fingers. 1212 hw->phy.id == M88E1112_E_PHY_ID) {
1130 */ 1213 hw_dbg("Link taking longer than expected.\n");
1131 ret_val = phy->ops.write_reg(hw, 1214 } else {
1132 M88E1000_PHY_PAGE_SELECT, 1215
1133 0x001d); 1216 /*
1134 if (ret_val) 1217 * We didn't get link.
1135 goto out; 1218 * Reset the DSP and cross our fingers.
1136 ret_val = igb_phy_reset_dsp(hw); 1219 */
1137 if (ret_val) 1220 ret_val = phy->ops.write_reg(hw,
1138 goto out; 1221 M88E1000_PHY_PAGE_SELECT,
1222 0x001d);
1223 if (ret_val)
1224 goto out;
1225 ret_val = igb_phy_reset_dsp(hw);
1226 if (ret_val)
1227 goto out;
1228 }
1139 } 1229 }
1140 1230
1141 /* Try once more */ 1231 /* Try once more */
@@ -1145,6 +1235,11 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1145 goto out; 1235 goto out;
1146 } 1236 }
1147 1237
1238 if (hw->phy.type != e1000_phy_m88 ||
1239 hw->phy.id == I347AT4_E_PHY_ID ||
1240 hw->phy.id == M88E1112_E_PHY_ID)
1241 goto out;
1242
1148 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data); 1243 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
1149 if (ret_val) 1244 if (ret_val)
1150 goto out; 1245 goto out;
@@ -1557,6 +1652,93 @@ out:
1557 return ret_val; 1652 return ret_val;
1558} 1653}
1559 1654
1655s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
1656{
1657 struct e1000_phy_info *phy = &hw->phy;
1658 s32 ret_val;
1659 u16 phy_data, phy_data2, index, default_page, is_cm;
1660
1661 switch (hw->phy.id) {
1662 case I347AT4_E_PHY_ID:
1663 /* Remember the original page select and set it to 7 */
1664 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1665 &default_page);
1666 if (ret_val)
1667 goto out;
1668
1669 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
1670 if (ret_val)
1671 goto out;
1672
1673 /* Get cable length from PHY Cable Diagnostics Control Reg */
1674 ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
1675 &phy_data);
1676 if (ret_val)
1677 goto out;
1678
1679 /* Check if the unit of cable length is meters or cm */
1680 ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
1681 if (ret_val)
1682 goto out;
1683
1684 is_cm = !(phy_data & I347AT4_PCDC_CABLE_LENGTH_UNIT);
1685
1686 /* Populate the phy structure with cable length in meters */
1687 phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
1688 phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
1689 phy->cable_length = phy_data / (is_cm ? 100 : 1);
1690
1691 /* Reset the page selec to its original value */
1692 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1693 default_page);
1694 if (ret_val)
1695 goto out;
1696 break;
1697 case M88E1112_E_PHY_ID:
1698 /* Remember the original page select and set it to 5 */
1699 ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
1700 &default_page);
1701 if (ret_val)
1702 goto out;
1703
1704 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
1705 if (ret_val)
1706 goto out;
1707
1708 ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
1709 &phy_data);
1710 if (ret_val)
1711 goto out;
1712
1713 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1714 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1715 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1716 ret_val = -E1000_ERR_PHY;
1717 goto out;
1718 }
1719
1720 phy->min_cable_length = e1000_m88_cable_length_table[index];
1721 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1722
1723 phy->cable_length = (phy->min_cable_length +
1724 phy->max_cable_length) / 2;
1725
1726 /* Reset the page select to its original value */
1727 ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
1728 default_page);
1729 if (ret_val)
1730 goto out;
1731
1732 break;
1733 default:
1734 ret_val = -E1000_ERR_PHY;
1735 goto out;
1736 }
1737
1738out:
1739 return ret_val;
1740}
1741
1560/** 1742/**
1561 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY 1743 * igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
1562 * @hw: pointer to the HW structure 1744 * @hw: pointer to the HW structure
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 565a6dbb3714..2cc117705a31 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -45,9 +45,11 @@ s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
47s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw); 50s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
50s32 igb_get_cable_length_m88(struct e1000_hw *hw); 51s32 igb_get_cable_length_m88(struct e1000_hw *hw);
52s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
51s32 igb_get_cable_length_igp_2(struct e1000_hw *hw); 53s32 igb_get_cable_length_igp_2(struct e1000_hw *hw);
52s32 igb_get_phy_id(struct e1000_hw *hw); 54s32 igb_get_phy_id(struct e1000_hw *hw);
53s32 igb_get_phy_info_igp(struct e1000_hw *hw); 55s32 igb_get_phy_info_igp(struct e1000_hw *hw);
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 6e63d9a7fc75..edab9c442399 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -143,7 +143,7 @@ struct igb_buffer {
143 u16 next_to_watch; 143 u16 next_to_watch;
144 unsigned int bytecount; 144 unsigned int bytecount;
145 u16 gso_segs; 145 u16 gso_segs;
146 union skb_shared_tx shtx; 146 u8 tx_flags;
147 u8 mapped_as_page; 147 u8 mapped_as_page;
148 }; 148 };
149 /* RX */ 149 /* RX */
@@ -159,6 +159,7 @@ struct igb_tx_queue_stats {
159 u64 packets; 159 u64 packets;
160 u64 bytes; 160 u64 bytes;
161 u64 restart_queue; 161 u64 restart_queue;
162 u64 restart_queue2;
162}; 163};
163 164
164struct igb_rx_queue_stats { 165struct igb_rx_queue_stats {
@@ -210,11 +211,14 @@ struct igb_ring {
210 /* TX */ 211 /* TX */
211 struct { 212 struct {
212 struct igb_tx_queue_stats tx_stats; 213 struct igb_tx_queue_stats tx_stats;
214 struct u64_stats_sync tx_syncp;
215 struct u64_stats_sync tx_syncp2;
213 bool detect_tx_hung; 216 bool detect_tx_hung;
214 }; 217 };
215 /* RX */ 218 /* RX */
216 struct { 219 struct {
217 struct igb_rx_queue_stats rx_stats; 220 struct igb_rx_queue_stats rx_stats;
221 struct u64_stats_sync rx_syncp;
218 u32 rx_buffer_len; 222 u32 rx_buffer_len;
219 }; 223 };
220 }; 224 };
@@ -288,6 +292,9 @@ struct igb_adapter {
288 struct timecompare compare; 292 struct timecompare compare;
289 struct hwtstamp_config hwtstamp_config; 293 struct hwtstamp_config hwtstamp_config;
290 294
295 spinlock_t stats64_lock;
296 struct rtnl_link_stats64 stats64;
297
291 /* structs defined in e1000_hw.h */ 298 /* structs defined in e1000_hw.h */
292 struct e1000_hw hw; 299 struct e1000_hw hw;
293 struct e1000_hw_stats stats; 300 struct e1000_hw_stats stats;
@@ -357,7 +364,7 @@ extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
357extern void igb_unmap_and_free_tx_resource(struct igb_ring *, 364extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
358 struct igb_buffer *); 365 struct igb_buffer *);
359extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int); 366extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
360extern void igb_update_stats(struct igb_adapter *); 367extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *);
361extern bool igb_has_link(struct igb_adapter *adapter); 368extern bool igb_has_link(struct igb_adapter *adapter);
362extern void igb_set_ethtool_ops(struct net_device *); 369extern void igb_set_ethtool_ops(struct net_device *);
363extern void igb_power_up_link(struct igb_adapter *); 370extern void igb_power_up_link(struct igb_adapter *);
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 26bf6a13d1c1..a70e16bcfa7e 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -90,8 +90,8 @@ static const struct igb_stats igb_gstrings_stats[] = {
90 90
91#define IGB_NETDEV_STAT(_net_stat) { \ 91#define IGB_NETDEV_STAT(_net_stat) { \
92 .stat_string = __stringify(_net_stat), \ 92 .stat_string = __stringify(_net_stat), \
93 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ 93 .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \
94 .stat_offset = offsetof(struct net_device_stats, _net_stat) \ 94 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \
95} 95}
96static const struct igb_stats igb_gstrings_net_stats[] = { 96static const struct igb_stats igb_gstrings_net_stats[] = {
97 IGB_NETDEV_STAT(rx_errors), 97 IGB_NETDEV_STAT(rx_errors),
@@ -111,8 +111,9 @@ static const struct igb_stats igb_gstrings_net_stats[] = {
111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats)) 111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
112#define IGB_RX_QUEUE_STATS_LEN \ 112#define IGB_RX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64)) 113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
114#define IGB_TX_QUEUE_STATS_LEN \ 114
115 (sizeof(struct igb_tx_queue_stats) / sizeof(u64)) 115#define IGB_TX_QUEUE_STATS_LEN 3 /* packets, bytes, restart_queue */
116
116#define IGB_QUEUE_STATS_LEN \ 117#define IGB_QUEUE_STATS_LEN \
117 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \ 118 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
118 IGB_RX_QUEUE_STATS_LEN) + \ 119 IGB_RX_QUEUE_STATS_LEN) + \
@@ -2070,12 +2071,14 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2070 struct ethtool_stats *stats, u64 *data) 2071 struct ethtool_stats *stats, u64 *data)
2071{ 2072{
2072 struct igb_adapter *adapter = netdev_priv(netdev); 2073 struct igb_adapter *adapter = netdev_priv(netdev);
2073 struct net_device_stats *net_stats = &netdev->stats; 2074 struct rtnl_link_stats64 *net_stats = &adapter->stats64;
2074 u64 *queue_stat; 2075 unsigned int start;
2075 int i, j, k; 2076 struct igb_ring *ring;
2077 int i, j;
2076 char *p; 2078 char *p;
2077 2079
2078 igb_update_stats(adapter); 2080 spin_lock(&adapter->stats64_lock);
2081 igb_update_stats(adapter, net_stats);
2079 2082
2080 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2083 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
2081 p = (char *)adapter + igb_gstrings_stats[i].stat_offset; 2084 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
@@ -2088,15 +2091,36 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
2088 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2091 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2089 } 2092 }
2090 for (j = 0; j < adapter->num_tx_queues; j++) { 2093 for (j = 0; j < adapter->num_tx_queues; j++) {
2091 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats; 2094 u64 restart2;
2092 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++) 2095
2093 data[i] = queue_stat[k]; 2096 ring = adapter->tx_ring[j];
2097 do {
2098 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
2099 data[i] = ring->tx_stats.packets;
2100 data[i+1] = ring->tx_stats.bytes;
2101 data[i+2] = ring->tx_stats.restart_queue;
2102 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
2103 do {
2104 start = u64_stats_fetch_begin_bh(&ring->tx_syncp2);
2105 restart2 = ring->tx_stats.restart_queue2;
2106 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp2, start));
2107 data[i+2] += restart2;
2108
2109 i += IGB_TX_QUEUE_STATS_LEN;
2094 } 2110 }
2095 for (j = 0; j < adapter->num_rx_queues; j++) { 2111 for (j = 0; j < adapter->num_rx_queues; j++) {
2096 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats; 2112 ring = adapter->rx_ring[j];
2097 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++) 2113 do {
2098 data[i] = queue_stat[k]; 2114 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
2115 data[i] = ring->rx_stats.packets;
2116 data[i+1] = ring->rx_stats.bytes;
2117 data[i+2] = ring->rx_stats.drops;
2118 data[i+3] = ring->rx_stats.csum_err;
2119 data[i+4] = ring->rx_stats.alloc_failed;
2120 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
2121 i += IGB_RX_QUEUE_STATS_LEN;
2099 } 2122 }
2123 spin_unlock(&adapter->stats64_lock);
2100} 2124}
2101 2125
2102static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2126static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index aaf506c56ccb..14db09e2fa8b 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -71,6 +71,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 }, 72 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -94,7 +96,6 @@ static int igb_setup_all_rx_resources(struct igb_adapter *);
94static void igb_free_all_tx_resources(struct igb_adapter *); 96static void igb_free_all_tx_resources(struct igb_adapter *);
95static void igb_free_all_rx_resources(struct igb_adapter *); 97static void igb_free_all_rx_resources(struct igb_adapter *);
96static void igb_setup_mrqc(struct igb_adapter *); 98static void igb_setup_mrqc(struct igb_adapter *);
97void igb_update_stats(struct igb_adapter *);
98static int igb_probe(struct pci_dev *, const struct pci_device_id *); 99static int igb_probe(struct pci_dev *, const struct pci_device_id *);
99static void __devexit igb_remove(struct pci_dev *pdev); 100static void __devexit igb_remove(struct pci_dev *pdev);
100static int igb_sw_init(struct igb_adapter *); 101static int igb_sw_init(struct igb_adapter *);
@@ -111,7 +112,8 @@ static void igb_update_phy_info(unsigned long);
111static void igb_watchdog(unsigned long); 112static void igb_watchdog(unsigned long);
112static void igb_watchdog_task(struct work_struct *); 113static void igb_watchdog_task(struct work_struct *);
113static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *); 114static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
114static struct net_device_stats *igb_get_stats(struct net_device *); 115static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
116 struct rtnl_link_stats64 *stats);
115static int igb_change_mtu(struct net_device *, int); 117static int igb_change_mtu(struct net_device *, int);
116static int igb_set_mac(struct net_device *, void *); 118static int igb_set_mac(struct net_device *, void *);
117static void igb_set_uta(struct igb_adapter *adapter); 119static void igb_set_uta(struct igb_adapter *adapter);
@@ -986,7 +988,7 @@ static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
986 * Attempt to configure interrupts using the best available 988 * Attempt to configure interrupts using the best available
987 * capabilities of the hardware and kernel. 989 * capabilities of the hardware and kernel.
988 **/ 990 **/
989static void igb_set_interrupt_capability(struct igb_adapter *adapter) 991static int igb_set_interrupt_capability(struct igb_adapter *adapter)
990{ 992{
991 int err; 993 int err;
992 int numvecs, i; 994 int numvecs, i;
@@ -1052,8 +1054,10 @@ msi_only:
1052 if (!pci_enable_msi(adapter->pdev)) 1054 if (!pci_enable_msi(adapter->pdev))
1053 adapter->flags |= IGB_FLAG_HAS_MSI; 1055 adapter->flags |= IGB_FLAG_HAS_MSI;
1054out: 1056out:
1055 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 1057 /* Notify the stack of the (possibly) reduced queue counts. */
1056 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 1058 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
1059 return netif_set_real_num_rx_queues(adapter->netdev,
1060 adapter->num_rx_queues);
1057} 1061}
1058 1062
1059/** 1063/**
@@ -1152,7 +1156,9 @@ static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
1152 struct pci_dev *pdev = adapter->pdev; 1156 struct pci_dev *pdev = adapter->pdev;
1153 int err; 1157 int err;
1154 1158
1155 igb_set_interrupt_capability(adapter); 1159 err = igb_set_interrupt_capability(adapter);
1160 if (err)
1161 return err;
1156 1162
1157 err = igb_alloc_q_vectors(adapter); 1163 err = igb_alloc_q_vectors(adapter);
1158 if (err) { 1164 if (err) {
@@ -1530,7 +1536,9 @@ void igb_down(struct igb_adapter *adapter)
1530 netif_carrier_off(netdev); 1536 netif_carrier_off(netdev);
1531 1537
1532 /* record the stats before reset*/ 1538 /* record the stats before reset*/
1533 igb_update_stats(adapter); 1539 spin_lock(&adapter->stats64_lock);
1540 igb_update_stats(adapter, &adapter->stats64);
1541 spin_unlock(&adapter->stats64_lock);
1534 1542
1535 adapter->link_speed = 0; 1543 adapter->link_speed = 0;
1536 adapter->link_duplex = 0; 1544 adapter->link_duplex = 0;
@@ -1683,7 +1691,7 @@ static const struct net_device_ops igb_netdev_ops = {
1683 .ndo_open = igb_open, 1691 .ndo_open = igb_open,
1684 .ndo_stop = igb_close, 1692 .ndo_stop = igb_close,
1685 .ndo_start_xmit = igb_xmit_frame_adv, 1693 .ndo_start_xmit = igb_xmit_frame_adv,
1686 .ndo_get_stats = igb_get_stats, 1694 .ndo_get_stats64 = igb_get_stats64,
1687 .ndo_set_rx_mode = igb_set_rx_mode, 1695 .ndo_set_rx_mode = igb_set_rx_mode,
1688 .ndo_set_multicast_list = igb_set_rx_mode, 1696 .ndo_set_multicast_list = igb_set_rx_mode,
1689 .ndo_set_mac_address = igb_set_mac, 1697 .ndo_set_mac_address = igb_set_mac,
@@ -1856,8 +1864,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1856 netdev->vlan_features |= NETIF_F_IPV6_CSUM; 1864 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
1857 netdev->vlan_features |= NETIF_F_SG; 1865 netdev->vlan_features |= NETIF_F_SG;
1858 1866
1859 if (pci_using_dac) 1867 if (pci_using_dac) {
1860 netdev->features |= NETIF_F_HIGHDMA; 1868 netdev->features |= NETIF_F_HIGHDMA;
1869 netdev->vlan_features |= NETIF_F_HIGHDMA;
1870 }
1861 1871
1862 if (hw->mac.type >= e1000_82576) 1872 if (hw->mac.type >= e1000_82576)
1863 netdev->features |= NETIF_F_SCTP_CSUM; 1873 netdev->features |= NETIF_F_SCTP_CSUM;
@@ -1888,9 +1898,9 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1888 goto err_eeprom; 1898 goto err_eeprom;
1889 } 1899 }
1890 1900
1891 setup_timer(&adapter->watchdog_timer, &igb_watchdog, 1901 setup_timer(&adapter->watchdog_timer, igb_watchdog,
1892 (unsigned long) adapter); 1902 (unsigned long) adapter);
1893 setup_timer(&adapter->phy_info_timer, &igb_update_phy_info, 1903 setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
1894 (unsigned long) adapter); 1904 (unsigned long) adapter);
1895 1905
1896 INIT_WORK(&adapter->reset_task, igb_reset_task); 1906 INIT_WORK(&adapter->reset_task, igb_reset_task);
@@ -2268,6 +2278,7 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2268 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 2278 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
2269 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 2279 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
2270 2280
2281 spin_lock_init(&adapter->stats64_lock);
2271#ifdef CONFIG_PCI_IOV 2282#ifdef CONFIG_PCI_IOV
2272 if (hw->mac.type == e1000_82576) 2283 if (hw->mac.type == e1000_82576)
2273 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs; 2284 adapter->vfs_allocated_count = (max_vfs > 7) ? 7 : max_vfs;
@@ -3475,7 +3486,9 @@ static void igb_watchdog_task(struct work_struct *work)
3475 } 3486 }
3476 } 3487 }
3477 3488
3478 igb_update_stats(adapter); 3489 spin_lock(&adapter->stats64_lock);
3490 igb_update_stats(adapter, &adapter->stats64);
3491 spin_unlock(&adapter->stats64_lock);
3479 3492
3480 for (i = 0; i < adapter->num_tx_queues; i++) { 3493 for (i = 0; i < adapter->num_tx_queues; i++) {
3481 struct igb_ring *tx_ring = adapter->tx_ring[i]; 3494 struct igb_ring *tx_ring = adapter->tx_ring[i];
@@ -3542,6 +3555,8 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3542 int new_val = q_vector->itr_val; 3555 int new_val = q_vector->itr_val;
3543 int avg_wire_size = 0; 3556 int avg_wire_size = 0;
3544 struct igb_adapter *adapter = q_vector->adapter; 3557 struct igb_adapter *adapter = q_vector->adapter;
3558 struct igb_ring *ring;
3559 unsigned int packets;
3545 3560
3546 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3561 /* For non-gigabit speeds, just fix the interrupt rate at 4000
3547 * ints/sec - ITR timer value of 120 ticks. 3562 * ints/sec - ITR timer value of 120 ticks.
@@ -3551,16 +3566,21 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
3551 goto set_itr_val; 3566 goto set_itr_val;
3552 } 3567 }
3553 3568
3554 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) { 3569 ring = q_vector->rx_ring;
3555 struct igb_ring *ring = q_vector->rx_ring; 3570 if (ring) {
3556 avg_wire_size = ring->total_bytes / ring->total_packets; 3571 packets = ACCESS_ONCE(ring->total_packets);
3572
3573 if (packets)
3574 avg_wire_size = ring->total_bytes / packets;
3557 } 3575 }
3558 3576
3559 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) { 3577 ring = q_vector->tx_ring;
3560 struct igb_ring *ring = q_vector->tx_ring; 3578 if (ring) {
3561 avg_wire_size = max_t(u32, avg_wire_size, 3579 packets = ACCESS_ONCE(ring->total_packets);
3562 (ring->total_bytes / 3580
3563 ring->total_packets)); 3581 if (packets)
3582 avg_wire_size = max_t(u32, avg_wire_size,
3583 ring->total_bytes / packets);
3564 } 3584 }
3565 3585
3566 /* if avg_wire_size isn't set no work was done */ 3586 /* if avg_wire_size isn't set no work was done */
@@ -3954,7 +3974,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3954 } 3974 }
3955 3975
3956 tx_ring->buffer_info[i].skb = skb; 3976 tx_ring->buffer_info[i].skb = skb;
3957 tx_ring->buffer_info[i].shtx = skb_shinfo(skb)->tx_flags; 3977 tx_ring->buffer_info[i].tx_flags = skb_shinfo(skb)->tx_flags;
3958 /* multiply data chunks by size of headers */ 3978 /* multiply data chunks by size of headers */
3959 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len; 3979 tx_ring->buffer_info[i].bytecount = ((gso_segs - 1) * hlen) + skb->len;
3960 tx_ring->buffer_info[i].gso_segs = gso_segs; 3980 tx_ring->buffer_info[i].gso_segs = gso_segs;
@@ -4069,7 +4089,11 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
4069 4089
4070 /* A reprieve! */ 4090 /* A reprieve! */
4071 netif_wake_subqueue(netdev, tx_ring->queue_index); 4091 netif_wake_subqueue(netdev, tx_ring->queue_index);
4072 tx_ring->tx_stats.restart_queue++; 4092
4093 u64_stats_update_begin(&tx_ring->tx_syncp2);
4094 tx_ring->tx_stats.restart_queue2++;
4095 u64_stats_update_end(&tx_ring->tx_syncp2);
4096
4073 return 0; 4097 return 0;
4074} 4098}
4075 4099
@@ -4088,7 +4112,6 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4088 u32 tx_flags = 0; 4112 u32 tx_flags = 0;
4089 u16 first; 4113 u16 first;
4090 u8 hdr_len = 0; 4114 u8 hdr_len = 0;
4091 union skb_shared_tx *shtx = skb_tx(skb);
4092 4115
4093 /* need: 1 descriptor per page, 4116 /* need: 1 descriptor per page,
4094 * + 2 desc gap to keep tail from touching head, 4117 * + 2 desc gap to keep tail from touching head,
@@ -4100,12 +4123,12 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
4100 return NETDEV_TX_BUSY; 4123 return NETDEV_TX_BUSY;
4101 } 4124 }
4102 4125
4103 if (unlikely(shtx->hardware)) { 4126 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
4104 shtx->in_progress = 1; 4127 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4105 tx_flags |= IGB_TX_FLAGS_TSTAMP; 4128 tx_flags |= IGB_TX_FLAGS_TSTAMP;
4106 } 4129 }
4107 4130
4108 if (vlan_tx_tag_present(skb) && adapter->vlgrp) { 4131 if (vlan_tx_tag_present(skb)) {
4109 tx_flags |= IGB_TX_FLAGS_VLAN; 4132 tx_flags |= IGB_TX_FLAGS_VLAN;
4110 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 4133 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
4111 } 4134 }
@@ -4207,16 +4230,22 @@ static void igb_reset_task(struct work_struct *work)
4207} 4230}
4208 4231
4209/** 4232/**
4210 * igb_get_stats - Get System Network Statistics 4233 * igb_get_stats64 - Get System Network Statistics
4211 * @netdev: network interface device structure 4234 * @netdev: network interface device structure
4235 * @stats: rtnl_link_stats64 pointer
4212 * 4236 *
4213 * Returns the address of the device statistics structure.
4214 * The statistics are actually updated from the timer callback.
4215 **/ 4237 **/
4216static struct net_device_stats *igb_get_stats(struct net_device *netdev) 4238static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *netdev,
4239 struct rtnl_link_stats64 *stats)
4217{ 4240{
4218 /* only return the current stats */ 4241 struct igb_adapter *adapter = netdev_priv(netdev);
4219 return &netdev->stats; 4242
4243 spin_lock(&adapter->stats64_lock);
4244 igb_update_stats(adapter, &adapter->stats64);
4245 memcpy(stats, &adapter->stats64, sizeof(*stats));
4246 spin_unlock(&adapter->stats64_lock);
4247
4248 return stats;
4220} 4249}
4221 4250
4222/** 4251/**
@@ -4298,15 +4327,17 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
4298 * @adapter: board private structure 4327 * @adapter: board private structure
4299 **/ 4328 **/
4300 4329
4301void igb_update_stats(struct igb_adapter *adapter) 4330void igb_update_stats(struct igb_adapter *adapter,
4331 struct rtnl_link_stats64 *net_stats)
4302{ 4332{
4303 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
4304 struct e1000_hw *hw = &adapter->hw; 4333 struct e1000_hw *hw = &adapter->hw;
4305 struct pci_dev *pdev = adapter->pdev; 4334 struct pci_dev *pdev = adapter->pdev;
4306 u32 reg, mpc; 4335 u32 reg, mpc;
4307 u16 phy_tmp; 4336 u16 phy_tmp;
4308 int i; 4337 int i;
4309 u64 bytes, packets; 4338 u64 bytes, packets;
4339 unsigned int start;
4340 u64 _bytes, _packets;
4310 4341
4311#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 4342#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
4312 4343
@@ -4324,10 +4355,17 @@ void igb_update_stats(struct igb_adapter *adapter)
4324 for (i = 0; i < adapter->num_rx_queues; i++) { 4355 for (i = 0; i < adapter->num_rx_queues; i++) {
4325 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF; 4356 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
4326 struct igb_ring *ring = adapter->rx_ring[i]; 4357 struct igb_ring *ring = adapter->rx_ring[i];
4358
4327 ring->rx_stats.drops += rqdpc_tmp; 4359 ring->rx_stats.drops += rqdpc_tmp;
4328 net_stats->rx_fifo_errors += rqdpc_tmp; 4360 net_stats->rx_fifo_errors += rqdpc_tmp;
4329 bytes += ring->rx_stats.bytes; 4361
4330 packets += ring->rx_stats.packets; 4362 do {
4363 start = u64_stats_fetch_begin_bh(&ring->rx_syncp);
4364 _bytes = ring->rx_stats.bytes;
4365 _packets = ring->rx_stats.packets;
4366 } while (u64_stats_fetch_retry_bh(&ring->rx_syncp, start));
4367 bytes += _bytes;
4368 packets += _packets;
4331 } 4369 }
4332 4370
4333 net_stats->rx_bytes = bytes; 4371 net_stats->rx_bytes = bytes;
@@ -4337,8 +4375,13 @@ void igb_update_stats(struct igb_adapter *adapter)
4337 packets = 0; 4375 packets = 0;
4338 for (i = 0; i < adapter->num_tx_queues; i++) { 4376 for (i = 0; i < adapter->num_tx_queues; i++) {
4339 struct igb_ring *ring = adapter->tx_ring[i]; 4377 struct igb_ring *ring = adapter->tx_ring[i];
4340 bytes += ring->tx_stats.bytes; 4378 do {
4341 packets += ring->tx_stats.packets; 4379 start = u64_stats_fetch_begin_bh(&ring->tx_syncp);
4380 _bytes = ring->tx_stats.bytes;
4381 _packets = ring->tx_stats.packets;
4382 } while (u64_stats_fetch_retry_bh(&ring->tx_syncp, start));
4383 bytes += _bytes;
4384 packets += _packets;
4342 } 4385 }
4343 net_stats->tx_bytes = bytes; 4386 net_stats->tx_bytes = bytes;
4344 net_stats->tx_packets = packets; 4387 net_stats->tx_packets = packets;
@@ -4660,12 +4703,13 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4660 u32 vmolr = rd32(E1000_VMOLR(vf)); 4703 u32 vmolr = rd32(E1000_VMOLR(vf));
4661 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4704 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4662 4705
4663 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC | 4706 vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
4664 IGB_VF_FLAG_MULTI_PROMISC); 4707 IGB_VF_FLAG_MULTI_PROMISC);
4665 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME); 4708 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4666 4709
4667 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) { 4710 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4668 vmolr |= E1000_VMOLR_MPME; 4711 vmolr |= E1000_VMOLR_MPME;
4712 vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
4669 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST; 4713 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4670 } else { 4714 } else {
4671 /* 4715 /*
@@ -5319,7 +5363,7 @@ static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct igb_buffer *bu
5319 u64 regval; 5363 u64 regval;
5320 5364
5321 /* if skb does not support hw timestamp or TX stamp not valid exit */ 5365 /* if skb does not support hw timestamp or TX stamp not valid exit */
5322 if (likely(!buffer_info->shtx.hardware) || 5366 if (likely(!(buffer_info->tx_flags & SKBTX_HW_TSTAMP)) ||
5323 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID)) 5367 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
5324 return; 5368 return;
5325 5369
@@ -5389,7 +5433,10 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5389 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 5433 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
5390 !(test_bit(__IGB_DOWN, &adapter->state))) { 5434 !(test_bit(__IGB_DOWN, &adapter->state))) {
5391 netif_wake_subqueue(netdev, tx_ring->queue_index); 5435 netif_wake_subqueue(netdev, tx_ring->queue_index);
5436
5437 u64_stats_update_begin(&tx_ring->tx_syncp);
5392 tx_ring->tx_stats.restart_queue++; 5438 tx_ring->tx_stats.restart_queue++;
5439 u64_stats_update_end(&tx_ring->tx_syncp);
5393 } 5440 }
5394 } 5441 }
5395 5442
@@ -5429,9 +5476,11 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
5429 } 5476 }
5430 tx_ring->total_bytes += total_bytes; 5477 tx_ring->total_bytes += total_bytes;
5431 tx_ring->total_packets += total_packets; 5478 tx_ring->total_packets += total_packets;
5479 u64_stats_update_begin(&tx_ring->tx_syncp);
5432 tx_ring->tx_stats.bytes += total_bytes; 5480 tx_ring->tx_stats.bytes += total_bytes;
5433 tx_ring->tx_stats.packets += total_packets; 5481 tx_ring->tx_stats.packets += total_packets;
5434 return (count < tx_ring->count); 5482 u64_stats_update_end(&tx_ring->tx_syncp);
5483 return count < tx_ring->count;
5435} 5484}
5436 5485
5437/** 5486/**
@@ -5456,7 +5505,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
5456static inline void igb_rx_checksum_adv(struct igb_ring *ring, 5505static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5457 u32 status_err, struct sk_buff *skb) 5506 u32 status_err, struct sk_buff *skb)
5458{ 5507{
5459 skb->ip_summed = CHECKSUM_NONE; 5508 skb_checksum_none_assert(skb);
5460 5509
5461 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 5510 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
5462 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || 5511 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
@@ -5472,9 +5521,11 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5472 * packets, (aka let the stack check the crc32c) 5521 * packets, (aka let the stack check the crc32c)
5473 */ 5522 */
5474 if ((skb->len == 60) && 5523 if ((skb->len == 60) &&
5475 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) 5524 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) {
5525 u64_stats_update_begin(&ring->rx_syncp);
5476 ring->rx_stats.csum_err++; 5526 ring->rx_stats.csum_err++;
5477 5527 u64_stats_update_end(&ring->rx_syncp);
5528 }
5478 /* let the stack verify checksum errors */ 5529 /* let the stack verify checksum errors */
5479 return; 5530 return;
5480 } 5531 }
@@ -5500,7 +5551,7 @@ static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5500 * values must belong to this one here and therefore we don't need to 5551 * values must belong to this one here and therefore we don't need to
5501 * compare any of the additional attributes stored for it. 5552 * compare any of the additional attributes stored for it.
5502 * 5553 *
5503 * If nothing went wrong, then it should have a skb_shared_tx that we 5554 * If nothing went wrong, then it should have a shared tx_flags that we
5504 * can turn into a skb_shared_hwtstamps. 5555 * can turn into a skb_shared_hwtstamps.
5505 */ 5556 */
5506 if (staterr & E1000_RXDADV_STAT_TSIP) { 5557 if (staterr & E1000_RXDADV_STAT_TSIP) {
@@ -5661,8 +5712,10 @@ next_desc:
5661 5712
5662 rx_ring->total_packets += total_packets; 5713 rx_ring->total_packets += total_packets;
5663 rx_ring->total_bytes += total_bytes; 5714 rx_ring->total_bytes += total_bytes;
5715 u64_stats_update_begin(&rx_ring->rx_syncp);
5664 rx_ring->rx_stats.packets += total_packets; 5716 rx_ring->rx_stats.packets += total_packets;
5665 rx_ring->rx_stats.bytes += total_bytes; 5717 rx_ring->rx_stats.bytes += total_bytes;
5718 u64_stats_update_end(&rx_ring->rx_syncp);
5666 return cleaned; 5719 return cleaned;
5667} 5720}
5668 5721
@@ -5690,8 +5743,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5690 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) { 5743 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
5691 if (!buffer_info->page) { 5744 if (!buffer_info->page) {
5692 buffer_info->page = netdev_alloc_page(netdev); 5745 buffer_info->page = netdev_alloc_page(netdev);
5693 if (!buffer_info->page) { 5746 if (unlikely(!buffer_info->page)) {
5747 u64_stats_update_begin(&rx_ring->rx_syncp);
5694 rx_ring->rx_stats.alloc_failed++; 5748 rx_ring->rx_stats.alloc_failed++;
5749 u64_stats_update_end(&rx_ring->rx_syncp);
5695 goto no_buffers; 5750 goto no_buffers;
5696 } 5751 }
5697 buffer_info->page_offset = 0; 5752 buffer_info->page_offset = 0;
@@ -5706,7 +5761,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5706 if (dma_mapping_error(rx_ring->dev, 5761 if (dma_mapping_error(rx_ring->dev,
5707 buffer_info->page_dma)) { 5762 buffer_info->page_dma)) {
5708 buffer_info->page_dma = 0; 5763 buffer_info->page_dma = 0;
5764 u64_stats_update_begin(&rx_ring->rx_syncp);
5709 rx_ring->rx_stats.alloc_failed++; 5765 rx_ring->rx_stats.alloc_failed++;
5766 u64_stats_update_end(&rx_ring->rx_syncp);
5710 goto no_buffers; 5767 goto no_buffers;
5711 } 5768 }
5712 } 5769 }
@@ -5714,8 +5771,10 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5714 skb = buffer_info->skb; 5771 skb = buffer_info->skb;
5715 if (!skb) { 5772 if (!skb) {
5716 skb = netdev_alloc_skb_ip_align(netdev, bufsz); 5773 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
5717 if (!skb) { 5774 if (unlikely(!skb)) {
5775 u64_stats_update_begin(&rx_ring->rx_syncp);
5718 rx_ring->rx_stats.alloc_failed++; 5776 rx_ring->rx_stats.alloc_failed++;
5777 u64_stats_update_end(&rx_ring->rx_syncp);
5719 goto no_buffers; 5778 goto no_buffers;
5720 } 5779 }
5721 5780
@@ -5729,7 +5788,9 @@ void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
5729 if (dma_mapping_error(rx_ring->dev, 5788 if (dma_mapping_error(rx_ring->dev,
5730 buffer_info->dma)) { 5789 buffer_info->dma)) {
5731 buffer_info->dma = 0; 5790 buffer_info->dma = 0;
5791 u64_stats_update_begin(&rx_ring->rx_syncp);
5732 rx_ring->rx_stats.alloc_failed++; 5792 rx_ring->rx_stats.alloc_failed++;
5793 u64_stats_update_end(&rx_ring->rx_syncp);
5733 goto no_buffers; 5794 goto no_buffers;
5734 } 5795 }
5735 } 5796 }
@@ -6092,7 +6153,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
6092 6153
6093 if (adapter->vlgrp) { 6154 if (adapter->vlgrp) {
6094 u16 vid; 6155 u16 vid;
6095 for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) { 6156 for (vid = 0; vid < VLAN_N_VID; vid++) {
6096 if (!vlan_group_get_device(adapter->vlgrp, vid)) 6157 if (!vlan_group_get_device(adapter->vlgrp, vid))
6097 continue; 6158 continue;
6098 igb_vlan_rx_add_vid(adapter->netdev, vid); 6159 igb_vlan_rx_add_vid(adapter->netdev, vid);
@@ -6107,6 +6168,13 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
6107 6168
6108 mac->autoneg = 0; 6169 mac->autoneg = 0;
6109 6170
6171 /* Fiber NIC's only allow 1000 Gbps Full duplex */
6172 if ((adapter->hw.phy.media_type == e1000_media_type_internal_serdes) &&
6173 spddplx != (SPEED_1000 + DUPLEX_FULL)) {
6174 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
6175 return -EINVAL;
6176 }
6177
6110 switch (spddplx) { 6178 switch (spddplx) {
6111 case SPEED_10 + DUPLEX_HALF: 6179 case SPEED_10 + DUPLEX_HALF:
6112 mac->forced_speed_duplex = ADVERTISE_10_HALF; 6180 mac->forced_speed_duplex = ADVERTISE_10_HALF;