aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/igb/igb_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/igb/igb_ethtool.c')
-rw-r--r--drivers/net/igb/igb_ethtool.c838
1 files changed, 455 insertions, 383 deletions
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index b243ed3b0c36..743038490104 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -35,6 +35,7 @@
35#include <linux/if_ether.h> 35#include <linux/if_ether.h>
36#include <linux/ethtool.h> 36#include <linux/ethtool.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h>
38 39
39#include "igb.h" 40#include "igb.h"
40 41
@@ -44,78 +45,94 @@ struct igb_stats {
44 int stat_offset; 45 int stat_offset;
45}; 46};
46 47
47#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 48#define IGB_STAT(_name, _stat) { \
48 offsetof(struct igb_adapter, m) 49 .stat_string = _name, \
50 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
51 .stat_offset = offsetof(struct igb_adapter, _stat) \
52}
49static const struct igb_stats igb_gstrings_stats[] = { 53static const struct igb_stats igb_gstrings_stats[] = {
50 { "rx_packets", IGB_STAT(stats.gprc) }, 54 IGB_STAT("rx_packets", stats.gprc),
51 { "tx_packets", IGB_STAT(stats.gptc) }, 55 IGB_STAT("tx_packets", stats.gptc),
52 { "rx_bytes", IGB_STAT(stats.gorc) }, 56 IGB_STAT("rx_bytes", stats.gorc),
53 { "tx_bytes", IGB_STAT(stats.gotc) }, 57 IGB_STAT("tx_bytes", stats.gotc),
54 { "rx_broadcast", IGB_STAT(stats.bprc) }, 58 IGB_STAT("rx_broadcast", stats.bprc),
55 { "tx_broadcast", IGB_STAT(stats.bptc) }, 59 IGB_STAT("tx_broadcast", stats.bptc),
56 { "rx_multicast", IGB_STAT(stats.mprc) }, 60 IGB_STAT("rx_multicast", stats.mprc),
57 { "tx_multicast", IGB_STAT(stats.mptc) }, 61 IGB_STAT("tx_multicast", stats.mptc),
58 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 62 IGB_STAT("multicast", stats.mprc),
59 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 63 IGB_STAT("collisions", stats.colc),
60 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 64 IGB_STAT("rx_crc_errors", stats.crcerrs),
61 { "multicast", IGB_STAT(stats.mprc) }, 65 IGB_STAT("rx_no_buffer_count", stats.rnbc),
62 { "collisions", IGB_STAT(stats.colc) }, 66 IGB_STAT("rx_missed_errors", stats.mpc),
63 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 67 IGB_STAT("tx_aborted_errors", stats.ecol),
64 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 68 IGB_STAT("tx_carrier_errors", stats.tncrs),
65 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 69 IGB_STAT("tx_window_errors", stats.latecol),
66 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 70 IGB_STAT("tx_abort_late_coll", stats.latecol),
67 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 71 IGB_STAT("tx_deferred_ok", stats.dc),
68 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, 72 IGB_STAT("tx_single_coll_ok", stats.scc),
69 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 73 IGB_STAT("tx_multi_coll_ok", stats.mcc),
70 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 74 IGB_STAT("tx_timeout_count", tx_timeout_count),
71 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 75 IGB_STAT("rx_long_length_errors", stats.roc),
72 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 76 IGB_STAT("rx_short_length_errors", stats.ruc),
73 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 77 IGB_STAT("rx_align_errors", stats.algnerrc),
74 { "tx_window_errors", IGB_STAT(stats.latecol) }, 78 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
75 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 79 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
76 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 80 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
77 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 81 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
78 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 82 IGB_STAT("tx_flow_control_xon", stats.xontxc),
79 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 83 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
80 { "tx_restart_queue", IGB_STAT(restart_queue) }, 84 IGB_STAT("rx_long_byte_count", stats.gorc),
81 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 85 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
82 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 86 IGB_STAT("tx_smbus", stats.mgptc),
83 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 87 IGB_STAT("rx_smbus", stats.mgprc),
84 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 88 IGB_STAT("dropped_smbus", stats.mgpdc),
85 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 89};
86 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 90
87 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 91#define IGB_NETDEV_STAT(_net_stat) { \
88 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 92 .stat_string = __stringify(_net_stat), \
89 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 93 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
90 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 94 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
91 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 95}
92 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 96static const struct igb_stats igb_gstrings_net_stats[] = {
93 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 97 IGB_NETDEV_STAT(rx_errors),
94 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 98 IGB_NETDEV_STAT(tx_errors),
95 { "tx_smbus", IGB_STAT(stats.mgptc) }, 99 IGB_NETDEV_STAT(tx_dropped),
96 { "rx_smbus", IGB_STAT(stats.mgprc) }, 100 IGB_NETDEV_STAT(rx_length_errors),
97 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 101 IGB_NETDEV_STAT(rx_over_errors),
102 IGB_NETDEV_STAT(rx_frame_errors),
103 IGB_NETDEV_STAT(rx_fifo_errors),
104 IGB_NETDEV_STAT(tx_fifo_errors),
105 IGB_NETDEV_STAT(tx_heartbeat_errors)
98}; 106};
99 107
100#define IGB_QUEUE_STATS_LEN \
101 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
102 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
103 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
104 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
105#define IGB_GLOBAL_STATS_LEN \ 108#define IGB_GLOBAL_STATS_LEN \
106 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 109 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
107#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 110#define IGB_NETDEV_STATS_LEN \
111 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
112#define IGB_RX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
114#define IGB_TX_QUEUE_STATS_LEN \
115 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
116#define IGB_QUEUE_STATS_LEN \
117 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
118 IGB_RX_QUEUE_STATS_LEN) + \
119 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
120 IGB_TX_QUEUE_STATS_LEN))
121#define IGB_STATS_LEN \
122 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
123
108static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 124static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
109 "Register test (offline)", "Eeprom test (offline)", 125 "Register test (offline)", "Eeprom test (offline)",
110 "Interrupt test (offline)", "Loopback test (offline)", 126 "Interrupt test (offline)", "Loopback test (offline)",
111 "Link test (on/offline)" 127 "Link test (on/offline)"
112}; 128};
113#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 129#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
114 130
115static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 131static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
116{ 132{
117 struct igb_adapter *adapter = netdev_priv(netdev); 133 struct igb_adapter *adapter = netdev_priv(netdev);
118 struct e1000_hw *hw = &adapter->hw; 134 struct e1000_hw *hw = &adapter->hw;
135 u32 status;
119 136
120 if (hw->phy.media_type == e1000_media_type_copper) { 137 if (hw->phy.media_type == e1000_media_type_copper) {
121 138
@@ -150,17 +167,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
150 167
151 ecmd->transceiver = XCVR_INTERNAL; 168 ecmd->transceiver = XCVR_INTERNAL;
152 169
153 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 170 status = rd32(E1000_STATUS);
154 171
155 adapter->hw.mac.ops.get_speed_and_duplex(hw, 172 if (status & E1000_STATUS_LU) {
156 &adapter->link_speed,
157 &adapter->link_duplex);
158 ecmd->speed = adapter->link_speed;
159 173
160 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 174 if ((status & E1000_STATUS_SPEED_1000) ||
161 * and HALF_DUPLEX != DUPLEX_HALF */ 175 hw->phy.media_type != e1000_media_type_copper)
176 ecmd->speed = SPEED_1000;
177 else if (status & E1000_STATUS_SPEED_100)
178 ecmd->speed = SPEED_100;
179 else
180 ecmd->speed = SPEED_10;
162 181
163 if (adapter->link_duplex == FULL_DUPLEX) 182 if ((status & E1000_STATUS_FD) ||
183 hw->phy.media_type != e1000_media_type_copper)
164 ecmd->duplex = DUPLEX_FULL; 184 ecmd->duplex = DUPLEX_FULL;
165 else 185 else
166 ecmd->duplex = DUPLEX_HALF; 186 ecmd->duplex = DUPLEX_HALF;
@@ -215,6 +235,24 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
215 return 0; 235 return 0;
216} 236}
217 237
238static u32 igb_get_link(struct net_device *netdev)
239{
240 struct igb_adapter *adapter = netdev_priv(netdev);
241 struct e1000_mac_info *mac = &adapter->hw.mac;
242
243 /*
244 * If the link is not reported up to netdev, interrupts are disabled,
245 * and so the physical link state may have changed since we last
246 * looked. Set get_link_status to make sure that the true link
247 * state is interrogated, rather than pulling a cached and possibly
248 * stale link state from the driver.
249 */
250 if (!netif_carrier_ok(netdev))
251 mac->get_link_status = 1;
252
253 return igb_has_link(adapter);
254}
255
218static void igb_get_pauseparam(struct net_device *netdev, 256static void igb_get_pauseparam(struct net_device *netdev,
219 struct ethtool_pauseparam *pause) 257 struct ethtool_pauseparam *pause)
220{ 258{
@@ -251,8 +289,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
251 if (netif_running(adapter->netdev)) { 289 if (netif_running(adapter->netdev)) {
252 igb_down(adapter); 290 igb_down(adapter);
253 igb_up(adapter); 291 igb_up(adapter);
254 } else 292 } else {
255 igb_reset(adapter); 293 igb_reset(adapter);
294 }
256 } else { 295 } else {
257 if (pause->rx_pause && pause->tx_pause) 296 if (pause->rx_pause && pause->tx_pause)
258 hw->fc.requested_mode = e1000_fc_full; 297 hw->fc.requested_mode = e1000_fc_full;
@@ -276,17 +315,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
276static u32 igb_get_rx_csum(struct net_device *netdev) 315static u32 igb_get_rx_csum(struct net_device *netdev)
277{ 316{
278 struct igb_adapter *adapter = netdev_priv(netdev); 317 struct igb_adapter *adapter = netdev_priv(netdev);
279 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 318 return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
280} 319}
281 320
282static int igb_set_rx_csum(struct net_device *netdev, u32 data) 321static int igb_set_rx_csum(struct net_device *netdev, u32 data)
283{ 322{
284 struct igb_adapter *adapter = netdev_priv(netdev); 323 struct igb_adapter *adapter = netdev_priv(netdev);
324 int i;
285 325
286 if (data) 326 for (i = 0; i < adapter->num_rx_queues; i++) {
287 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 327 if (data)
288 else 328 adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
289 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 329 else
330 adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
331 }
290 332
291 return 0; 333 return 0;
292} 334}
@@ -302,7 +344,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
302 344
303 if (data) { 345 if (data) {
304 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 346 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
305 if (adapter->hw.mac.type == e1000_82576) 347 if (adapter->hw.mac.type >= e1000_82576)
306 netdev->features |= NETIF_F_SCTP_CSUM; 348 netdev->features |= NETIF_F_SCTP_CSUM;
307 } else { 349 } else {
308 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 350 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -496,19 +538,10 @@ static void igb_get_regs(struct net_device *netdev,
496 regs_buff[119] = adapter->stats.scvpc; 538 regs_buff[119] = adapter->stats.scvpc;
497 regs_buff[120] = adapter->stats.hrmpc; 539 regs_buff[120] = adapter->stats.hrmpc;
498 540
499 /* These should probably be added to e1000_regs.h instead */
500 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
501 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
502 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
503 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
504 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
505 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
506 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
507
508 for (i = 0; i < 4; i++) 541 for (i = 0; i < 4; i++)
509 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 542 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
510 for (i = 0; i < 4; i++) 543 for (i = 0; i < 4; i++)
511 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 544 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
512 for (i = 0; i < 4; i++) 545 for (i = 0; i < 4; i++)
513 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 546 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
514 for (i = 0; i < 4; i++) 547 for (i = 0; i < 4; i++)
@@ -733,17 +766,17 @@ static int igb_set_ringparam(struct net_device *netdev,
733 struct igb_adapter *adapter = netdev_priv(netdev); 766 struct igb_adapter *adapter = netdev_priv(netdev);
734 struct igb_ring *temp_ring; 767 struct igb_ring *temp_ring;
735 int i, err = 0; 768 int i, err = 0;
736 u32 new_rx_count, new_tx_count; 769 u16 new_rx_count, new_tx_count;
737 770
738 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 771 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
739 return -EINVAL; 772 return -EINVAL;
740 773
741 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 774 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
742 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 775 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
743 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 776 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
744 777
745 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 778 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
746 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 779 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
747 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 780 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
748 781
749 if ((new_tx_count == adapter->tx_ring_count) && 782 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -757,9 +790,9 @@ static int igb_set_ringparam(struct net_device *netdev,
757 790
758 if (!netif_running(adapter->netdev)) { 791 if (!netif_running(adapter->netdev)) {
759 for (i = 0; i < adapter->num_tx_queues; i++) 792 for (i = 0; i < adapter->num_tx_queues; i++)
760 adapter->tx_ring[i].count = new_tx_count; 793 adapter->tx_ring[i]->count = new_tx_count;
761 for (i = 0; i < adapter->num_rx_queues; i++) 794 for (i = 0; i < adapter->num_rx_queues; i++)
762 adapter->rx_ring[i].count = new_rx_count; 795 adapter->rx_ring[i]->count = new_rx_count;
763 adapter->tx_ring_count = new_tx_count; 796 adapter->tx_ring_count = new_tx_count;
764 adapter->rx_ring_count = new_rx_count; 797 adapter->rx_ring_count = new_rx_count;
765 goto clear_reset; 798 goto clear_reset;
@@ -783,12 +816,12 @@ static int igb_set_ringparam(struct net_device *netdev,
783 * to the tx and rx ring structs. 816 * to the tx and rx ring structs.
784 */ 817 */
785 if (new_tx_count != adapter->tx_ring_count) { 818 if (new_tx_count != adapter->tx_ring_count) {
786 memcpy(temp_ring, adapter->tx_ring,
787 adapter->num_tx_queues * sizeof(struct igb_ring));
788
789 for (i = 0; i < adapter->num_tx_queues; i++) { 819 for (i = 0; i < adapter->num_tx_queues; i++) {
820 memcpy(&temp_ring[i], adapter->tx_ring[i],
821 sizeof(struct igb_ring));
822
790 temp_ring[i].count = new_tx_count; 823 temp_ring[i].count = new_tx_count;
791 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 824 err = igb_setup_tx_resources(&temp_ring[i]);
792 if (err) { 825 if (err) {
793 while (i) { 826 while (i) {
794 i--; 827 i--;
@@ -798,22 +831,23 @@ static int igb_set_ringparam(struct net_device *netdev,
798 } 831 }
799 } 832 }
800 833
801 for (i = 0; i < adapter->num_tx_queues; i++) 834 for (i = 0; i < adapter->num_tx_queues; i++) {
802 igb_free_tx_resources(&adapter->tx_ring[i]); 835 igb_free_tx_resources(adapter->tx_ring[i]);
803 836
804 memcpy(adapter->tx_ring, temp_ring, 837 memcpy(adapter->tx_ring[i], &temp_ring[i],
805 adapter->num_tx_queues * sizeof(struct igb_ring)); 838 sizeof(struct igb_ring));
839 }
806 840
807 adapter->tx_ring_count = new_tx_count; 841 adapter->tx_ring_count = new_tx_count;
808 } 842 }
809 843
810 if (new_rx_count != adapter->rx_ring->count) { 844 if (new_rx_count != adapter->rx_ring_count) {
811 memcpy(temp_ring, adapter->rx_ring,
812 adapter->num_rx_queues * sizeof(struct igb_ring));
813
814 for (i = 0; i < adapter->num_rx_queues; i++) { 845 for (i = 0; i < adapter->num_rx_queues; i++) {
846 memcpy(&temp_ring[i], adapter->rx_ring[i],
847 sizeof(struct igb_ring));
848
815 temp_ring[i].count = new_rx_count; 849 temp_ring[i].count = new_rx_count;
816 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 850 err = igb_setup_rx_resources(&temp_ring[i]);
817 if (err) { 851 if (err) {
818 while (i) { 852 while (i) {
819 i--; 853 i--;
@@ -824,11 +858,12 @@ static int igb_set_ringparam(struct net_device *netdev,
824 858
825 } 859 }
826 860
827 for (i = 0; i < adapter->num_rx_queues; i++) 861 for (i = 0; i < adapter->num_rx_queues; i++) {
828 igb_free_rx_resources(&adapter->rx_ring[i]); 862 igb_free_rx_resources(adapter->rx_ring[i]);
829 863
830 memcpy(adapter->rx_ring, temp_ring, 864 memcpy(adapter->rx_ring[i], &temp_ring[i],
831 adapter->num_rx_queues * sizeof(struct igb_ring)); 865 sizeof(struct igb_ring));
866 }
832 867
833 adapter->rx_ring_count = new_rx_count; 868 adapter->rx_ring_count = new_rx_count;
834 } 869 }
@@ -867,6 +902,49 @@ struct igb_reg_test {
867#define TABLE64_TEST_LO 5 902#define TABLE64_TEST_LO 5
868#define TABLE64_TEST_HI 6 903#define TABLE64_TEST_HI 6
869 904
905/* 82580 reg test */
906static struct igb_reg_test reg_test_82580[] = {
907 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
908 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
909 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
910 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
911 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
912 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
913 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
914 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
915 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
916 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
917 /* RDH is read-only for 82580, only test RDT. */
918 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
919 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
920 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
921 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
922 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
923 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
924 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
925 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
926 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
927 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
928 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
929 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
930 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
931 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
932 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
933 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
934 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
935 { E1000_RA, 0, 16, TABLE64_TEST_LO,
936 0xFFFFFFFF, 0xFFFFFFFF },
937 { E1000_RA, 0, 16, TABLE64_TEST_HI,
938 0x83FFFFFF, 0xFFFFFFFF },
939 { E1000_RA2, 0, 8, TABLE64_TEST_LO,
940 0xFFFFFFFF, 0xFFFFFFFF },
941 { E1000_RA2, 0, 8, TABLE64_TEST_HI,
942 0x83FFFFFF, 0xFFFFFFFF },
943 { E1000_MTA, 0, 128, TABLE32_TEST,
944 0xFFFFFFFF, 0xFFFFFFFF },
945 { 0, 0, 0, 0 }
946};
947
870/* 82576 reg test */ 948/* 82576 reg test */
871static struct igb_reg_test reg_test_82576[] = { 949static struct igb_reg_test reg_test_82576[] = {
872 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 950 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -944,7 +1022,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
944{ 1022{
945 struct e1000_hw *hw = &adapter->hw; 1023 struct e1000_hw *hw = &adapter->hw;
946 u32 pat, val; 1024 u32 pat, val;
947 u32 _test[] = 1025 static const u32 _test[] =
948 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 1026 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
949 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 1027 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
950 wr32(reg, (_test[pat] & write)); 1028 wr32(reg, (_test[pat] & write));
@@ -957,6 +1035,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
957 return 1; 1035 return 1;
958 } 1036 }
959 } 1037 }
1038
960 return 0; 1039 return 0;
961} 1040}
962 1041
@@ -974,6 +1053,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
974 *data = reg; 1053 *data = reg;
975 return 1; 1054 return 1;
976 } 1055 }
1056
977 return 0; 1057 return 0;
978} 1058}
979 1059
@@ -996,14 +1076,18 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
996 u32 value, before, after; 1076 u32 value, before, after;
997 u32 i, toggle; 1077 u32 i, toggle;
998 1078
999 toggle = 0x7FFFF3FF;
1000
1001 switch (adapter->hw.mac.type) { 1079 switch (adapter->hw.mac.type) {
1080 case e1000_82580:
1081 test = reg_test_82580;
1082 toggle = 0x7FEFF3FF;
1083 break;
1002 case e1000_82576: 1084 case e1000_82576:
1003 test = reg_test_82576; 1085 test = reg_test_82576;
1086 toggle = 0x7FFFF3FF;
1004 break; 1087 break;
1005 default: 1088 default:
1006 test = reg_test_82575; 1089 test = reg_test_82575;
1090 toggle = 0x7FFFF3FF;
1007 break; 1091 break;
1008 } 1092 }
1009 1093
@@ -1081,8 +1165,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1081 *data = 0; 1165 *data = 0;
1082 /* Read and add up the contents of the EEPROM */ 1166 /* Read and add up the contents of the EEPROM */
1083 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1167 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1084 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1168 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1085 < 0) {
1086 *data = 1; 1169 *data = 1;
1087 break; 1170 break;
1088 } 1171 }
@@ -1098,8 +1181,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1098 1181
1099static irqreturn_t igb_test_intr(int irq, void *data) 1182static irqreturn_t igb_test_intr(int irq, void *data)
1100{ 1183{
1101 struct net_device *netdev = (struct net_device *) data; 1184 struct igb_adapter *adapter = (struct igb_adapter *) data;
1102 struct igb_adapter *adapter = netdev_priv(netdev);
1103 struct e1000_hw *hw = &adapter->hw; 1185 struct e1000_hw *hw = &adapter->hw;
1104 1186
1105 adapter->test_icr |= rd32(E1000_ICR); 1187 adapter->test_icr |= rd32(E1000_ICR);
@@ -1117,38 +1199,45 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1117 *data = 0; 1199 *data = 0;
1118 1200
1119 /* Hook up test interrupt handler just for this test */ 1201 /* Hook up test interrupt handler just for this test */
1120 if (adapter->msix_entries) 1202 if (adapter->msix_entries) {
1121 /* NOTE: we don't test MSI-X interrupts here, yet */ 1203 if (request_irq(adapter->msix_entries[0].vector,
1122 return 0; 1204 igb_test_intr, 0, netdev->name, adapter)) {
1123 1205 *data = 1;
1124 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1206 return -1;
1207 }
1208 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1125 shared_int = false; 1209 shared_int = false;
1126 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1210 if (request_irq(irq,
1211 igb_test_intr, 0, netdev->name, adapter)) {
1127 *data = 1; 1212 *data = 1;
1128 return -1; 1213 return -1;
1129 } 1214 }
1130 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1215 } else if (!request_irq(irq, igb_test_intr, IRQF_PROBE_SHARED,
1131 netdev->name, netdev)) { 1216 netdev->name, adapter)) {
1132 shared_int = false; 1217 shared_int = false;
1133 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1218 } else if (request_irq(irq, igb_test_intr, IRQF_SHARED,
1134 netdev->name, netdev)) { 1219 netdev->name, adapter)) {
1135 *data = 1; 1220 *data = 1;
1136 return -1; 1221 return -1;
1137 } 1222 }
1138 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1223 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1139 (shared_int ? "shared" : "unshared")); 1224 (shared_int ? "shared" : "unshared"));
1225
1140 /* Disable all the interrupts */ 1226 /* Disable all the interrupts */
1141 wr32(E1000_IMC, 0xFFFFFFFF); 1227 wr32(E1000_IMC, ~0);
1142 msleep(10); 1228 msleep(10);
1143 1229
1144 /* Define all writable bits for ICS */ 1230 /* Define all writable bits for ICS */
1145 switch(hw->mac.type) { 1231 switch (hw->mac.type) {
1146 case e1000_82575: 1232 case e1000_82575:
1147 ics_mask = 0x37F47EDD; 1233 ics_mask = 0x37F47EDD;
1148 break; 1234 break;
1149 case e1000_82576: 1235 case e1000_82576:
1150 ics_mask = 0x77D4FBFD; 1236 ics_mask = 0x77D4FBFD;
1151 break; 1237 break;
1238 case e1000_82580:
1239 ics_mask = 0x77DCFED5;
1240 break;
1152 default: 1241 default:
1153 ics_mask = 0x7FFFFFFF; 1242 ics_mask = 0x7FFFFFFF;
1154 break; 1243 break;
@@ -1232,190 +1321,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1232 msleep(10); 1321 msleep(10);
1233 1322
1234 /* Unhook test interrupt handler */ 1323 /* Unhook test interrupt handler */
1235 free_irq(irq, netdev); 1324 if (adapter->msix_entries)
1325 free_irq(adapter->msix_entries[0].vector, adapter);
1326 else
1327 free_irq(irq, adapter);
1236 1328
1237 return *data; 1329 return *data;
1238} 1330}
1239 1331
1240static void igb_free_desc_rings(struct igb_adapter *adapter) 1332static void igb_free_desc_rings(struct igb_adapter *adapter)
1241{ 1333{
1242 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1334 igb_free_tx_resources(&adapter->test_tx_ring);
1243 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1335 igb_free_rx_resources(&adapter->test_rx_ring);
1244 struct pci_dev *pdev = adapter->pdev;
1245 int i;
1246
1247 if (tx_ring->desc && tx_ring->buffer_info) {
1248 for (i = 0; i < tx_ring->count; i++) {
1249 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1250 if (buf->dma)
1251 pci_unmap_single(pdev, buf->dma, buf->length,
1252 PCI_DMA_TODEVICE);
1253 if (buf->skb)
1254 dev_kfree_skb(buf->skb);
1255 }
1256 }
1257
1258 if (rx_ring->desc && rx_ring->buffer_info) {
1259 for (i = 0; i < rx_ring->count; i++) {
1260 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1261 if (buf->dma)
1262 pci_unmap_single(pdev, buf->dma,
1263 IGB_RXBUFFER_2048,
1264 PCI_DMA_FROMDEVICE);
1265 if (buf->skb)
1266 dev_kfree_skb(buf->skb);
1267 }
1268 }
1269
1270 if (tx_ring->desc) {
1271 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1272 tx_ring->dma);
1273 tx_ring->desc = NULL;
1274 }
1275 if (rx_ring->desc) {
1276 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1277 rx_ring->dma);
1278 rx_ring->desc = NULL;
1279 }
1280
1281 kfree(tx_ring->buffer_info);
1282 tx_ring->buffer_info = NULL;
1283 kfree(rx_ring->buffer_info);
1284 rx_ring->buffer_info = NULL;
1285
1286 return;
1287} 1336}
1288 1337
1289static int igb_setup_desc_rings(struct igb_adapter *adapter) 1338static int igb_setup_desc_rings(struct igb_adapter *adapter)
1290{ 1339{
1291 struct e1000_hw *hw = &adapter->hw;
1292 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1340 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1293 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1341 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1294 struct pci_dev *pdev = adapter->pdev; 1342 struct e1000_hw *hw = &adapter->hw;
1295 struct igb_buffer *buffer_info; 1343 int ret_val;
1296 u32 rctl;
1297 int i, ret_val;
1298 1344
1299 /* Setup Tx descriptor ring and Tx buffers */ 1345 /* Setup Tx descriptor ring and Tx buffers */
1346 tx_ring->count = IGB_DEFAULT_TXD;
1347 tx_ring->pdev = adapter->pdev;
1348 tx_ring->netdev = adapter->netdev;
1349 tx_ring->reg_idx = adapter->vfs_allocated_count;
1300 1350
1301 if (!tx_ring->count) 1351 if (igb_setup_tx_resources(tx_ring)) {
1302 tx_ring->count = IGB_DEFAULT_TXD;
1303
1304 tx_ring->buffer_info = kcalloc(tx_ring->count,
1305 sizeof(struct igb_buffer),
1306 GFP_KERNEL);
1307 if (!tx_ring->buffer_info) {
1308 ret_val = 1; 1352 ret_val = 1;
1309 goto err_nomem; 1353 goto err_nomem;
1310 } 1354 }
1311 1355
1312 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1356 igb_setup_tctl(adapter);
1313 tx_ring->size = ALIGN(tx_ring->size, 4096); 1357 igb_configure_tx_ring(adapter, tx_ring);
1314 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1315 &tx_ring->dma);
1316 if (!tx_ring->desc) {
1317 ret_val = 2;
1318 goto err_nomem;
1319 }
1320 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1321
1322 wr32(E1000_TDBAL(0),
1323 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1324 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1325 wr32(E1000_TDLEN(0),
1326 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1327 wr32(E1000_TDH(0), 0);
1328 wr32(E1000_TDT(0), 0);
1329 wr32(E1000_TCTL,
1330 E1000_TCTL_PSP | E1000_TCTL_EN |
1331 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1332 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1333
1334 for (i = 0; i < tx_ring->count; i++) {
1335 union e1000_adv_tx_desc *tx_desc;
1336 struct sk_buff *skb;
1337 unsigned int size = 1024;
1338
1339 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1340 skb = alloc_skb(size, GFP_KERNEL);
1341 if (!skb) {
1342 ret_val = 3;
1343 goto err_nomem;
1344 }
1345 skb_put(skb, size);
1346 buffer_info = &tx_ring->buffer_info[i];
1347 buffer_info->skb = skb;
1348 buffer_info->length = skb->len;
1349 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1350 PCI_DMA_TODEVICE);
1351 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1352 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1353 E1000_ADVTXD_PAYLEN_SHIFT;
1354 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1355 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1356 E1000_TXD_CMD_IFCS |
1357 E1000_TXD_CMD_RS |
1358 E1000_ADVTXD_DTYP_DATA |
1359 E1000_ADVTXD_DCMD_DEXT);
1360 }
1361 1358
1362 /* Setup Rx descriptor ring and Rx buffers */ 1359 /* Setup Rx descriptor ring and Rx buffers */
1363 1360 rx_ring->count = IGB_DEFAULT_RXD;
1364 if (!rx_ring->count) 1361 rx_ring->pdev = adapter->pdev;
1365 rx_ring->count = IGB_DEFAULT_RXD; 1362 rx_ring->netdev = adapter->netdev;
1366 1363 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1367 rx_ring->buffer_info = kcalloc(rx_ring->count, 1364 rx_ring->reg_idx = adapter->vfs_allocated_count;
1368 sizeof(struct igb_buffer), 1365
1369 GFP_KERNEL); 1366 if (igb_setup_rx_resources(rx_ring)) {
1370 if (!rx_ring->buffer_info) { 1367 ret_val = 3;
1371 ret_val = 4;
1372 goto err_nomem; 1368 goto err_nomem;
1373 } 1369 }
1374 1370
1375 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1371 /* set the default queue to queue 0 of PF */
1376 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1372 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1377 &rx_ring->dma);
1378 if (!rx_ring->desc) {
1379 ret_val = 5;
1380 goto err_nomem;
1381 }
1382 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1383 1373
1384 rctl = rd32(E1000_RCTL); 1374 /* enable receive ring */
1385 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1375 igb_setup_rctl(adapter);
1386 wr32(E1000_RDBAL(0), 1376 igb_configure_rx_ring(adapter, rx_ring);
1387 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1377
1388 wr32(E1000_RDBAH(0), 1378 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1389 ((u64) rx_ring->dma >> 32));
1390 wr32(E1000_RDLEN(0), rx_ring->size);
1391 wr32(E1000_RDH(0), 0);
1392 wr32(E1000_RDT(0), 0);
1393 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1394 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1395 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1396 wr32(E1000_RCTL, rctl);
1397 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1398
1399 for (i = 0; i < rx_ring->count; i++) {
1400 union e1000_adv_rx_desc *rx_desc;
1401 struct sk_buff *skb;
1402
1403 buffer_info = &rx_ring->buffer_info[i];
1404 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1405 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1406 GFP_KERNEL);
1407 if (!skb) {
1408 ret_val = 6;
1409 goto err_nomem;
1410 }
1411 skb_reserve(skb, NET_IP_ALIGN);
1412 buffer_info->skb = skb;
1413 buffer_info->dma = pci_map_single(pdev, skb->data,
1414 IGB_RXBUFFER_2048,
1415 PCI_DMA_FROMDEVICE);
1416 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1417 memset(skb->data, 0x00, skb->len);
1418 }
1419 1379
1420 return 0; 1380 return 0;
1421 1381
@@ -1449,6 +1409,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter)
1449 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140); 1409 igb_write_phy_reg(hw, PHY_CONTROL, 0x9140);
1450 /* autoneg off */ 1410 /* autoneg off */
1451 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140); 1411 igb_write_phy_reg(hw, PHY_CONTROL, 0x8140);
1412 } else if (hw->phy.type == e1000_phy_82580) {
1413 /* enable MII loopback */
1414 igb_write_phy_reg(hw, I82580_PHY_LBK_CTRL, 0x8041);
1452 } 1415 }
1453 1416
1454 ctrl_reg = rd32(E1000_CTRL); 1417 ctrl_reg = rd32(E1000_CTRL);
@@ -1491,7 +1454,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1491 struct e1000_hw *hw = &adapter->hw; 1454 struct e1000_hw *hw = &adapter->hw;
1492 u32 reg; 1455 u32 reg;
1493 1456
1494 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1457 reg = rd32(E1000_CTRL_EXT);
1458
1459 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1460 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1495 reg = rd32(E1000_RCTL); 1461 reg = rd32(E1000_RCTL);
1496 reg |= E1000_RCTL_LBM_TCVR; 1462 reg |= E1000_RCTL_LBM_TCVR;
1497 wr32(E1000_RCTL, reg); 1463 wr32(E1000_RCTL, reg);
@@ -1522,11 +1488,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1522 wr32(E1000_PCS_LCTL, reg); 1488 wr32(E1000_PCS_LCTL, reg);
1523 1489
1524 return 0; 1490 return 0;
1525 } else if (hw->phy.media_type == e1000_media_type_copper) {
1526 return igb_set_phy_loopback(adapter);
1527 } 1491 }
1528 1492
1529 return 7; 1493 return igb_set_phy_loopback(adapter);
1530} 1494}
1531 1495
1532static void igb_loopback_cleanup(struct igb_adapter *adapter) 1496static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1552,35 +1516,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1552 unsigned int frame_size) 1516 unsigned int frame_size)
1553{ 1517{
1554 memset(skb->data, 0xFF, frame_size); 1518 memset(skb->data, 0xFF, frame_size);
1555 frame_size &= ~1; 1519 frame_size /= 2;
1556 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1520 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1557 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1521 memset(&skb->data[frame_size + 10], 0xBE, 1);
1558 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1522 memset(&skb->data[frame_size + 12], 0xAF, 1);
1559} 1523}
1560 1524
1561static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1525static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1562{ 1526{
1563 frame_size &= ~1; 1527 frame_size /= 2;
1564 if (*(skb->data + 3) == 0xFF) 1528 if (*(skb->data + 3) == 0xFF) {
1565 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1529 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1566 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1530 (*(skb->data + frame_size + 12) == 0xAF)) {
1567 return 0; 1531 return 0;
1532 }
1533 }
1568 return 13; 1534 return 13;
1569} 1535}
1570 1536
1537static int igb_clean_test_rings(struct igb_ring *rx_ring,
1538 struct igb_ring *tx_ring,
1539 unsigned int size)
1540{
1541 union e1000_adv_rx_desc *rx_desc;
1542 struct igb_buffer *buffer_info;
1543 int rx_ntc, tx_ntc, count = 0;
1544 u32 staterr;
1545
1546 /* initialize next to clean and descriptor values */
1547 rx_ntc = rx_ring->next_to_clean;
1548 tx_ntc = tx_ring->next_to_clean;
1549 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1550 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1551
1552 while (staterr & E1000_RXD_STAT_DD) {
1553 /* check rx buffer */
1554 buffer_info = &rx_ring->buffer_info[rx_ntc];
1555
1556 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1557 pci_unmap_single(rx_ring->pdev,
1558 buffer_info->dma,
1559 rx_ring->rx_buffer_len,
1560 PCI_DMA_FROMDEVICE);
1561 buffer_info->dma = 0;
1562
1563 /* verify contents of skb */
1564 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1565 count++;
1566
1567 /* unmap buffer on tx side */
1568 buffer_info = &tx_ring->buffer_info[tx_ntc];
1569 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1570
1571 /* increment rx/tx next to clean counters */
1572 rx_ntc++;
1573 if (rx_ntc == rx_ring->count)
1574 rx_ntc = 0;
1575 tx_ntc++;
1576 if (tx_ntc == tx_ring->count)
1577 tx_ntc = 0;
1578
1579 /* fetch next descriptor */
1580 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1581 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1582 }
1583
1584 /* re-map buffers to ring, store next to clean values */
1585 igb_alloc_rx_buffers_adv(rx_ring, count);
1586 rx_ring->next_to_clean = rx_ntc;
1587 tx_ring->next_to_clean = tx_ntc;
1588
1589 return count;
1590}
1591
1571static int igb_run_loopback_test(struct igb_adapter *adapter) 1592static int igb_run_loopback_test(struct igb_adapter *adapter)
1572{ 1593{
1573 struct e1000_hw *hw = &adapter->hw;
1574 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1594 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1575 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1595 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1576 struct pci_dev *pdev = adapter->pdev; 1596 int i, j, lc, good_cnt, ret_val = 0;
1577 int i, j, k, l, lc, good_cnt; 1597 unsigned int size = 1024;
1578 int ret_val = 0; 1598 netdev_tx_t tx_ret_val;
1579 unsigned long time; 1599 struct sk_buff *skb;
1600
1601 /* allocate test skb */
1602 skb = alloc_skb(size, GFP_KERNEL);
1603 if (!skb)
1604 return 11;
1580 1605
1581 wr32(E1000_RDT(0), rx_ring->count - 1); 1606 /* place data into test skb */
1607 igb_create_lbtest_frame(skb, size);
1608 skb_put(skb, size);
1582 1609
1583 /* Calculate the loop count based on the largest descriptor ring 1610 /*
1611 * Calculate the loop count based on the largest descriptor ring
1584 * The idea is to wrap the largest ring a number of times using 64 1612 * The idea is to wrap the largest ring a number of times using 64
1585 * send/receive pairs during each loop 1613 * send/receive pairs during each loop
1586 */ 1614 */
@@ -1590,50 +1618,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1590 else 1618 else
1591 lc = ((rx_ring->count / 64) * 2) + 1; 1619 lc = ((rx_ring->count / 64) * 2) + 1;
1592 1620
1593 k = l = 0;
1594 for (j = 0; j <= lc; j++) { /* loop count loop */ 1621 for (j = 0; j <= lc; j++) { /* loop count loop */
1595 for (i = 0; i < 64; i++) { /* send the packets */ 1622 /* reset count of good packets */
1596 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1597 1024);
1598 pci_dma_sync_single_for_device(pdev,
1599 tx_ring->buffer_info[k].dma,
1600 tx_ring->buffer_info[k].length,
1601 PCI_DMA_TODEVICE);
1602 k++;
1603 if (k == tx_ring->count)
1604 k = 0;
1605 }
1606 wr32(E1000_TDT(0), k);
1607 msleep(200);
1608 time = jiffies; /* set the start time for the receive */
1609 good_cnt = 0; 1623 good_cnt = 0;
1610 do { /* receive the sent packets */ 1624
1611 pci_dma_sync_single_for_cpu(pdev, 1625 /* place 64 packets on the transmit queue*/
1612 rx_ring->buffer_info[l].dma, 1626 for (i = 0; i < 64; i++) {
1613 IGB_RXBUFFER_2048, 1627 skb_get(skb);
1614 PCI_DMA_FROMDEVICE); 1628 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1615 1629 if (tx_ret_val == NETDEV_TX_OK)
1616 ret_val = igb_check_lbtest_frame(
1617 rx_ring->buffer_info[l].skb, 1024);
1618 if (!ret_val)
1619 good_cnt++; 1630 good_cnt++;
1620 l++; 1631 }
1621 if (l == rx_ring->count) 1632
1622 l = 0;
1623 /* time + 20 msecs (200 msecs on 2.4) is more than
1624 * enough time to complete the receives, if it's
1625 * exceeded, break and error off
1626 */
1627 } while (good_cnt < 64 && jiffies < (time + 20));
1628 if (good_cnt != 64) { 1633 if (good_cnt != 64) {
1629 ret_val = 13; /* ret_val is the same as mis-compare */ 1634 ret_val = 12;
1630 break; 1635 break;
1631 } 1636 }
1632 if (jiffies >= (time + 20)) { 1637
1633 ret_val = 14; /* error code for time out error */ 1638 /* allow 200 milliseconds for packets to go from tx to rx */
1639 msleep(200);
1640
1641 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1642 if (good_cnt != 64) {
1643 ret_val = 13;
1634 break; 1644 break;
1635 } 1645 }
1636 } /* end loop count loop */ 1646 } /* end loop count loop */
1647
1648 /* free the original skb */
1649 kfree_skb(skb);
1650
1637 return ret_val; 1651 return ret_val;
1638} 1652}
1639 1653
@@ -1686,8 +1700,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1686 if (hw->mac.autoneg) 1700 if (hw->mac.autoneg)
1687 msleep(4000); 1701 msleep(4000);
1688 1702
1689 if (!(rd32(E1000_STATUS) & 1703 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1690 E1000_STATUS_LU))
1691 *data = 1; 1704 *data = 1;
1692 } 1705 }
1693 return *data; 1706 return *data;
@@ -1712,6 +1725,9 @@ static void igb_diag_test(struct net_device *netdev,
1712 1725
1713 dev_info(&adapter->pdev->dev, "offline testing starting\n"); 1726 dev_info(&adapter->pdev->dev, "offline testing starting\n");
1714 1727
1728 /* power up link for link test */
1729 igb_power_up_link(adapter);
1730
1715 /* Link test performed before hardware reset so autoneg doesn't 1731 /* Link test performed before hardware reset so autoneg doesn't
1716 * interfere with test result */ 1732 * interfere with test result */
1717 if (igb_link_test(adapter, &data[4])) 1733 if (igb_link_test(adapter, &data[4]))
@@ -1735,6 +1751,8 @@ static void igb_diag_test(struct net_device *netdev,
1735 eth_test->flags |= ETH_TEST_FL_FAILED; 1751 eth_test->flags |= ETH_TEST_FL_FAILED;
1736 1752
1737 igb_reset(adapter); 1753 igb_reset(adapter);
1754 /* power up link for loopback test */
1755 igb_power_up_link(adapter);
1738 if (igb_loopback_test(adapter, &data[3])) 1756 if (igb_loopback_test(adapter, &data[3]))
1739 eth_test->flags |= ETH_TEST_FL_FAILED; 1757 eth_test->flags |= ETH_TEST_FL_FAILED;
1740 1758
@@ -1753,9 +1771,14 @@ static void igb_diag_test(struct net_device *netdev,
1753 dev_open(netdev); 1771 dev_open(netdev);
1754 } else { 1772 } else {
1755 dev_info(&adapter->pdev->dev, "online testing starting\n"); 1773 dev_info(&adapter->pdev->dev, "online testing starting\n");
1756 /* Online tests */ 1774
1757 if (igb_link_test(adapter, &data[4])) 1775 /* PHY is powered down when interface is down */
1758 eth_test->flags |= ETH_TEST_FL_FAILED; 1776 if (!netif_carrier_ok(netdev)) {
1777 data[4] = 0;
1778 } else {
1779 if (igb_link_test(adapter, &data[4]))
1780 eth_test->flags |= ETH_TEST_FL_FAILED;
1781 }
1759 1782
1760 /* Online tests aren't run; pass by default */ 1783 /* Online tests aren't run; pass by default */
1761 data[0] = 0; 1784 data[0] = 0;
@@ -1791,6 +1814,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1791 retval = 0; 1814 retval = 0;
1792 break; 1815 break;
1793 case E1000_DEV_ID_82576_QUAD_COPPER: 1816 case E1000_DEV_ID_82576_QUAD_COPPER:
1817 case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
1794 /* quad port adapters only support WoL on port A */ 1818 /* quad port adapters only support WoL on port A */
1795 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) { 1819 if (!(adapter->flags & IGB_FLAG_QUAD_PORT_A)) {
1796 wol->supported = 0; 1820 wol->supported = 0;
@@ -1803,7 +1827,7 @@ static int igb_wol_exclusion(struct igb_adapter *adapter,
1803 /* dual port cards only support WoL on port A from now on 1827 /* dual port cards only support WoL on port A from now on
1804 * unless it was enabled in the eeprom for port B 1828 * unless it was enabled in the eeprom for port B
1805 * so exclude FUNC_1 ports from having WoL enabled */ 1829 * so exclude FUNC_1 ports from having WoL enabled */
1806 if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1830 if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) &&
1807 !adapter->eeprom_wol) { 1831 !adapter->eeprom_wol) {
1808 wol->supported = 0; 1832 wol->supported = 0;
1809 break; 1833 break;
@@ -1820,7 +1844,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1820 struct igb_adapter *adapter = netdev_priv(netdev); 1844 struct igb_adapter *adapter = netdev_priv(netdev);
1821 1845
1822 wol->supported = WAKE_UCAST | WAKE_MCAST | 1846 wol->supported = WAKE_UCAST | WAKE_MCAST |
1823 WAKE_BCAST | WAKE_MAGIC; 1847 WAKE_BCAST | WAKE_MAGIC |
1848 WAKE_PHY;
1824 wol->wolopts = 0; 1849 wol->wolopts = 0;
1825 1850
1826 /* this function will set ->supported = 0 and return 1 if wol is not 1851 /* this function will set ->supported = 0 and return 1 if wol is not
@@ -1843,15 +1868,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1843 wol->wolopts |= WAKE_BCAST; 1868 wol->wolopts |= WAKE_BCAST;
1844 if (adapter->wol & E1000_WUFC_MAG) 1869 if (adapter->wol & E1000_WUFC_MAG)
1845 wol->wolopts |= WAKE_MAGIC; 1870 wol->wolopts |= WAKE_MAGIC;
1846 1871 if (adapter->wol & E1000_WUFC_LNKC)
1847 return; 1872 wol->wolopts |= WAKE_PHY;
1848} 1873}
1849 1874
1850static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) 1875static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1851{ 1876{
1852 struct igb_adapter *adapter = netdev_priv(netdev); 1877 struct igb_adapter *adapter = netdev_priv(netdev);
1853 1878
1854 if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) 1879 if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
1855 return -EOPNOTSUPP; 1880 return -EOPNOTSUPP;
1856 1881
1857 if (igb_wol_exclusion(adapter, wol) || 1882 if (igb_wol_exclusion(adapter, wol) ||
@@ -1869,7 +1894,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1869 adapter->wol |= E1000_WUFC_BC; 1894 adapter->wol |= E1000_WUFC_BC;
1870 if (wol->wolopts & WAKE_MAGIC) 1895 if (wol->wolopts & WAKE_MAGIC)
1871 adapter->wol |= E1000_WUFC_MAG; 1896 adapter->wol |= E1000_WUFC_MAG;
1872 1897 if (wol->wolopts & WAKE_PHY)
1898 adapter->wol |= E1000_WUFC_LNKC;
1873 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1899 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1874 1900
1875 return 0; 1901 return 0;
@@ -1882,12 +1908,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
1882{ 1908{
1883 struct igb_adapter *adapter = netdev_priv(netdev); 1909 struct igb_adapter *adapter = netdev_priv(netdev);
1884 struct e1000_hw *hw = &adapter->hw; 1910 struct e1000_hw *hw = &adapter->hw;
1911 unsigned long timeout;
1885 1912
1886 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1913 timeout = data * 1000;
1887 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1914
1915 /*
1916 * msleep_interruptable only accepts unsigned int so we are limited
1917 * in how long a duration we can wait
1918 */
1919 if (!timeout || timeout > UINT_MAX)
1920 timeout = UINT_MAX;
1888 1921
1889 igb_blink_led(hw); 1922 igb_blink_led(hw);
1890 msleep_interruptible(data * 1000); 1923 msleep_interruptible(timeout);
1891 1924
1892 igb_led_off(hw); 1925 igb_led_off(hw);
1893 clear_bit(IGB_LED_ON, &adapter->led_status); 1926 clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1900,7 +1933,6 @@ static int igb_set_coalesce(struct net_device *netdev,
1900 struct ethtool_coalesce *ec) 1933 struct ethtool_coalesce *ec)
1901{ 1934{
1902 struct igb_adapter *adapter = netdev_priv(netdev); 1935 struct igb_adapter *adapter = netdev_priv(netdev);
1903 struct e1000_hw *hw = &adapter->hw;
1904 int i; 1936 int i;
1905 1937
1906 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1938 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1909,17 +1941,39 @@ static int igb_set_coalesce(struct net_device *netdev,
1909 (ec->rx_coalesce_usecs == 2)) 1941 (ec->rx_coalesce_usecs == 2))
1910 return -EINVAL; 1942 return -EINVAL;
1911 1943
1944 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1945 ((ec->tx_coalesce_usecs > 3) &&
1946 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1947 (ec->tx_coalesce_usecs == 2))
1948 return -EINVAL;
1949
1950 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1951 return -EINVAL;
1952
1912 /* convert to rate of irq's per second */ 1953 /* convert to rate of irq's per second */
1913 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1954 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1914 adapter->itr_setting = ec->rx_coalesce_usecs; 1955 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1915 adapter->itr = IGB_START_ITR; 1956 else
1916 } else { 1957 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1917 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1918 adapter->itr = adapter->itr_setting;
1919 }
1920 1958
1921 for (i = 0; i < adapter->num_rx_queues; i++) 1959 /* convert to rate of irq's per second */
1922 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1960 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1961 adapter->tx_itr_setting = adapter->rx_itr_setting;
1962 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1963 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1964 else
1965 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1966
1967 for (i = 0; i < adapter->num_q_vectors; i++) {
1968 struct igb_q_vector *q_vector = adapter->q_vector[i];
1969 if (q_vector->rx_ring)
1970 q_vector->itr_val = adapter->rx_itr_setting;
1971 else
1972 q_vector->itr_val = adapter->tx_itr_setting;
1973 if (q_vector->itr_val && q_vector->itr_val <= 3)
1974 q_vector->itr_val = IGB_START_ITR;
1975 q_vector->set_itr = 1;
1976 }
1923 1977
1924 return 0; 1978 return 0;
1925} 1979}
@@ -1929,15 +1983,21 @@ static int igb_get_coalesce(struct net_device *netdev,
1929{ 1983{
1930 struct igb_adapter *adapter = netdev_priv(netdev); 1984 struct igb_adapter *adapter = netdev_priv(netdev);
1931 1985
1932 if (adapter->itr_setting <= 3) 1986 if (adapter->rx_itr_setting <= 3)
1933 ec->rx_coalesce_usecs = adapter->itr_setting; 1987 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1934 else 1988 else
1935 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1989 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1990
1991 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1992 if (adapter->tx_itr_setting <= 3)
1993 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1994 else
1995 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1996 }
1936 1997
1937 return 0; 1998 return 0;
1938} 1999}
1939 2000
1940
1941static int igb_nway_reset(struct net_device *netdev) 2001static int igb_nway_reset(struct net_device *netdev)
1942{ 2002{
1943 struct igb_adapter *adapter = netdev_priv(netdev); 2003 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1962,31 +2022,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1962 struct ethtool_stats *stats, u64 *data) 2022 struct ethtool_stats *stats, u64 *data)
1963{ 2023{
1964 struct igb_adapter *adapter = netdev_priv(netdev); 2024 struct igb_adapter *adapter = netdev_priv(netdev);
2025 struct net_device_stats *net_stats = &netdev->stats;
1965 u64 *queue_stat; 2026 u64 *queue_stat;
1966 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 2027 int i, j, k;
1967 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 2028 char *p;
1968 int j;
1969 int i;
1970 2029
1971 igb_update_stats(adapter); 2030 igb_update_stats(adapter);
2031
1972 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 2032 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1973 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 2033 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1974 data[i] = (igb_gstrings_stats[i].sizeof_stat == 2034 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1975 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 2035 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1976 } 2036 }
2037 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
2038 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
2039 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
2040 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
2041 }
1977 for (j = 0; j < adapter->num_tx_queues; j++) { 2042 for (j = 0; j < adapter->num_tx_queues; j++) {
1978 int k; 2043 queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
1979 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 2044 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1980 for (k = 0; k < stat_count_tx; k++) 2045 data[i] = queue_stat[k];
1981 data[i + k] = queue_stat[k];
1982 i += k;
1983 } 2046 }
1984 for (j = 0; j < adapter->num_rx_queues; j++) { 2047 for (j = 0; j < adapter->num_rx_queues; j++) {
1985 int k; 2048 queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
1986 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 2049 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1987 for (k = 0; k < stat_count_rx; k++) 2050 data[i] = queue_stat[k];
1988 data[i + k] = queue_stat[k];
1989 i += k;
1990 } 2051 }
1991} 2052}
1992 2053
@@ -2007,11 +2068,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2007 ETH_GSTRING_LEN); 2068 ETH_GSTRING_LEN);
2008 p += ETH_GSTRING_LEN; 2069 p += ETH_GSTRING_LEN;
2009 } 2070 }
2071 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
2072 memcpy(p, igb_gstrings_net_stats[i].stat_string,
2073 ETH_GSTRING_LEN);
2074 p += ETH_GSTRING_LEN;
2075 }
2010 for (i = 0; i < adapter->num_tx_queues; i++) { 2076 for (i = 0; i < adapter->num_tx_queues; i++) {
2011 sprintf(p, "tx_queue_%u_packets", i); 2077 sprintf(p, "tx_queue_%u_packets", i);
2012 p += ETH_GSTRING_LEN; 2078 p += ETH_GSTRING_LEN;
2013 sprintf(p, "tx_queue_%u_bytes", i); 2079 sprintf(p, "tx_queue_%u_bytes", i);
2014 p += ETH_GSTRING_LEN; 2080 p += ETH_GSTRING_LEN;
2081 sprintf(p, "tx_queue_%u_restart", i);
2082 p += ETH_GSTRING_LEN;
2015 } 2083 }
2016 for (i = 0; i < adapter->num_rx_queues; i++) { 2084 for (i = 0; i < adapter->num_rx_queues; i++) {
2017 sprintf(p, "rx_queue_%u_packets", i); 2085 sprintf(p, "rx_queue_%u_packets", i);
@@ -2020,6 +2088,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2020 p += ETH_GSTRING_LEN; 2088 p += ETH_GSTRING_LEN;
2021 sprintf(p, "rx_queue_%u_drops", i); 2089 sprintf(p, "rx_queue_%u_drops", i);
2022 p += ETH_GSTRING_LEN; 2090 p += ETH_GSTRING_LEN;
2091 sprintf(p, "rx_queue_%u_csum_err", i);
2092 p += ETH_GSTRING_LEN;
2093 sprintf(p, "rx_queue_%u_alloc_failed", i);
2094 p += ETH_GSTRING_LEN;
2023 } 2095 }
2024/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2096/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2025 break; 2097 break;
@@ -2037,7 +2109,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
2037 .get_msglevel = igb_get_msglevel, 2109 .get_msglevel = igb_get_msglevel,
2038 .set_msglevel = igb_set_msglevel, 2110 .set_msglevel = igb_set_msglevel,
2039 .nway_reset = igb_nway_reset, 2111 .nway_reset = igb_nway_reset,
2040 .get_link = ethtool_op_get_link, 2112 .get_link = igb_get_link,
2041 .get_eeprom_len = igb_get_eeprom_len, 2113 .get_eeprom_len = igb_get_eeprom_len,
2042 .get_eeprom = igb_get_eeprom, 2114 .get_eeprom = igb_get_eeprom,
2043 .set_eeprom = igb_set_eeprom, 2115 .set_eeprom = igb_set_eeprom,