aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_ethtool.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_ethtool.c')
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c1047
1 files changed, 580 insertions, 467 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index dcebc82c6f4d..cb1555bc8548 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 10 Gigabit PCI Express Linux driver 3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation. 4 Copyright(c) 1999 - 2011 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -84,6 +84,7 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, 84 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)},
85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 85 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 86 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
87 {"fdir_overflow", IXGBE_STAT(fdir_overflow)},
87 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, 88 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)},
88 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, 89 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)},
89 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, 90 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)},
@@ -102,6 +103,10 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
102 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, 103 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)},
103 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, 104 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)},
104 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, 105 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)},
106 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)},
107 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)},
108 {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)},
109 {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)},
105#ifdef IXGBE_FCOE 110#ifdef IXGBE_FCOE
106 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, 111 {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)},
107 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, 112 {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)},
@@ -152,20 +157,35 @@ static int ixgbe_get_settings(struct net_device *netdev,
152 ecmd->supported |= (SUPPORTED_1000baseT_Full | 157 ecmd->supported |= (SUPPORTED_1000baseT_Full |
153 SUPPORTED_Autoneg); 158 SUPPORTED_Autoneg);
154 159
160 switch (hw->mac.type) {
161 case ixgbe_mac_X540:
162 ecmd->supported |= SUPPORTED_100baseT_Full;
163 break;
164 default:
165 break;
166 }
167
155 ecmd->advertising = ADVERTISED_Autoneg; 168 ecmd->advertising = ADVERTISED_Autoneg;
156 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) 169 if (hw->phy.autoneg_advertised) {
157 ecmd->advertising |= ADVERTISED_10000baseT_Full; 170 if (hw->phy.autoneg_advertised &
158 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) 171 IXGBE_LINK_SPEED_100_FULL)
159 ecmd->advertising |= ADVERTISED_1000baseT_Full; 172 ecmd->advertising |= ADVERTISED_100baseT_Full;
160 /* 173 if (hw->phy.autoneg_advertised &
161 * It's possible that phy.autoneg_advertised may not be 174 IXGBE_LINK_SPEED_10GB_FULL)
162 * set yet. If so display what the default would be - 175 ecmd->advertising |= ADVERTISED_10000baseT_Full;
163 * both 1G and 10G supported. 176 if (hw->phy.autoneg_advertised &
164 */ 177 IXGBE_LINK_SPEED_1GB_FULL)
165 if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full | 178 ecmd->advertising |= ADVERTISED_1000baseT_Full;
166 ADVERTISED_10000baseT_Full))) 179 } else {
180 /*
181 * Default advertised modes in case
182 * phy.autoneg_advertised isn't set.
183 */
167 ecmd->advertising |= (ADVERTISED_10000baseT_Full | 184 ecmd->advertising |= (ADVERTISED_10000baseT_Full |
168 ADVERTISED_1000baseT_Full); 185 ADVERTISED_1000baseT_Full);
186 if (hw->mac.type == ixgbe_mac_X540)
187 ecmd->advertising |= ADVERTISED_100baseT_Full;
188 }
169 189
170 if (hw->phy.media_type == ixgbe_media_type_copper) { 190 if (hw->phy.media_type == ixgbe_media_type_copper) {
171 ecmd->supported |= SUPPORTED_TP; 191 ecmd->supported |= SUPPORTED_TP;
@@ -185,6 +205,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
185 ADVERTISED_FIBRE); 205 ADVERTISED_FIBRE);
186 ecmd->port = PORT_FIBRE; 206 ecmd->port = PORT_FIBRE;
187 ecmd->autoneg = AUTONEG_DISABLE; 207 ecmd->autoneg = AUTONEG_DISABLE;
208 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
209 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
210 ecmd->supported |= (SUPPORTED_1000baseT_Full |
211 SUPPORTED_Autoneg |
212 SUPPORTED_FIBRE);
213 ecmd->advertising = (ADVERTISED_10000baseT_Full |
214 ADVERTISED_1000baseT_Full |
215 ADVERTISED_Autoneg |
216 ADVERTISED_FIBRE);
217 ecmd->port = PORT_FIBRE;
188 } else { 218 } else {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 219 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_FIBRE); 220 SUPPORTED_FIBRE);
@@ -204,6 +234,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
204 /* Get PHY type */ 234 /* Get PHY type */
205 switch (adapter->hw.phy.type) { 235 switch (adapter->hw.phy.type) {
206 case ixgbe_phy_tn: 236 case ixgbe_phy_tn:
237 case ixgbe_phy_aq:
207 case ixgbe_phy_cu_unknown: 238 case ixgbe_phy_cu_unknown:
208 /* Copper 10G-BASET */ 239 /* Copper 10G-BASET */
209 ecmd->port = PORT_TP; 240 ecmd->port = PORT_TP;
@@ -260,11 +291,22 @@ static int ixgbe_get_settings(struct net_device *netdev,
260 291
261 hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 292 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
262 if (link_up) { 293 if (link_up) {
263 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 294 switch (link_speed) {
264 SPEED_10000 : SPEED_1000; 295 case IXGBE_LINK_SPEED_10GB_FULL:
296 ethtool_cmd_speed_set(ecmd, SPEED_10000);
297 break;
298 case IXGBE_LINK_SPEED_1GB_FULL:
299 ethtool_cmd_speed_set(ecmd, SPEED_1000);
300 break;
301 case IXGBE_LINK_SPEED_100_FULL:
302 ethtool_cmd_speed_set(ecmd, SPEED_100);
303 break;
304 default:
305 break;
306 }
265 ecmd->duplex = DUPLEX_FULL; 307 ecmd->duplex = DUPLEX_FULL;
266 } else { 308 } else {
267 ecmd->speed = -1; 309 ethtool_cmd_speed_set(ecmd, -1);
268 ecmd->duplex = -1; 310 ecmd->duplex = -1;
269 } 311 }
270 312
@@ -295,6 +337,9 @@ static int ixgbe_set_settings(struct net_device *netdev,
295 if (ecmd->advertising & ADVERTISED_1000baseT_Full) 337 if (ecmd->advertising & ADVERTISED_1000baseT_Full)
296 advertised |= IXGBE_LINK_SPEED_1GB_FULL; 338 advertised |= IXGBE_LINK_SPEED_1GB_FULL;
297 339
340 if (ecmd->advertising & ADVERTISED_100baseT_Full)
341 advertised |= IXGBE_LINK_SPEED_100_FULL;
342
298 if (old == advertised) 343 if (old == advertised)
299 return err; 344 return err;
300 /* this sets the link speed and restarts auto-neg */ 345 /* this sets the link speed and restarts auto-neg */
@@ -306,9 +351,10 @@ static int ixgbe_set_settings(struct net_device *netdev,
306 } 351 }
307 } else { 352 } else {
308 /* in this case we currently only support 10Gb/FULL */ 353 /* in this case we currently only support 10Gb/FULL */
354 u32 speed = ethtool_cmd_speed(ecmd);
309 if ((ecmd->autoneg == AUTONEG_ENABLE) || 355 if ((ecmd->autoneg == AUTONEG_ENABLE) ||
310 (ecmd->advertising != ADVERTISED_10000baseT_Full) || 356 (ecmd->advertising != ADVERTISED_10000baseT_Full) ||
311 (ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) 357 (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL))
312 return -EINVAL; 358 return -EINVAL;
313 } 359 }
314 360
@@ -332,13 +378,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
332 else 378 else
333 pause->autoneg = 1; 379 pause->autoneg = 1;
334 380
335#ifdef CONFIG_DCB
336 if (hw->fc.current_mode == ixgbe_fc_pfc) {
337 pause->rx_pause = 0;
338 pause->tx_pause = 0;
339 }
340
341#endif
342 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 381 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
343 pause->rx_pause = 1; 382 pause->rx_pause = 1;
344 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 383 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +385,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
346 } else if (hw->fc.current_mode == ixgbe_fc_full) { 385 } else if (hw->fc.current_mode == ixgbe_fc_full) {
347 pause->rx_pause = 1; 386 pause->rx_pause = 1;
348 pause->tx_pause = 1; 387 pause->tx_pause = 1;
388#ifdef CONFIG_DCB
389 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
390 pause->rx_pause = 0;
391 pause->tx_pause = 0;
392#endif
349 } 393 }
350} 394}
351 395
@@ -363,7 +407,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
363 return -EINVAL; 407 return -EINVAL;
364 408
365#endif 409#endif
366
367 fc = hw->fc; 410 fc = hw->fc;
368 411
369 if (pause->autoneg != AUTONEG_ENABLE) 412 if (pause->autoneg != AUTONEG_ENABLE)
@@ -401,7 +444,7 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
401static u32 ixgbe_get_rx_csum(struct net_device *netdev) 444static u32 ixgbe_get_rx_csum(struct net_device *netdev)
402{ 445{
403 struct ixgbe_adapter *adapter = netdev_priv(netdev); 446 struct ixgbe_adapter *adapter = netdev_priv(netdev);
404 return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED); 447 return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
405} 448}
406 449
407static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) 450static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
@@ -412,11 +455,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
412 else 455 else
413 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 456 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
414 457
415 if (netif_running(netdev))
416 ixgbe_reinit_locked(adapter);
417 else
418 ixgbe_reset(adapter);
419
420 return 0; 458 return 0;
421} 459}
422 460
@@ -428,16 +466,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
428static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 466static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
429{ 467{
430 struct ixgbe_adapter *adapter = netdev_priv(netdev); 468 struct ixgbe_adapter *adapter = netdev_priv(netdev);
469 u32 feature_list;
431 470
432 if (data) { 471 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
433 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 472 switch (adapter->hw.mac.type) {
434 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 473 case ixgbe_mac_82599EB:
435 netdev->features |= NETIF_F_SCTP_CSUM; 474 case ixgbe_mac_X540:
436 } else { 475 feature_list |= NETIF_F_SCTP_CSUM;
437 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 476 break;
438 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 477 default:
439 netdev->features &= ~NETIF_F_SCTP_CSUM; 478 break;
440 } 479 }
480 if (data)
481 netdev->features |= feature_list;
482 else
483 netdev->features &= ~feature_list;
441 484
442 return 0; 485 return 0;
443} 486}
@@ -530,10 +573,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
530 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 573 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
531 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 574 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
532 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 575 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
533 for (i = 0; i < 8; i++) 576 for (i = 0; i < 8; i++) {
534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 577 switch (hw->mac.type) {
535 for (i = 0; i < 8; i++) 578 case ixgbe_mac_82598EB:
536 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 579 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
580 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
581 break;
582 case ixgbe_mac_82599EB:
583 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
584 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
585 break;
586 default:
587 break;
588 }
589 }
537 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 590 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
538 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 591 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
539 592
@@ -615,6 +668,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
615 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 668 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
616 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 669 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
617 670
671 /* DCB */
618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 672 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 673 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 674 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -798,11 +852,8 @@ static int ixgbe_get_eeprom(struct net_device *netdev,
798 if (!eeprom_buff) 852 if (!eeprom_buff)
799 return -ENOMEM; 853 return -ENOMEM;
800 854
801 for (i = 0; i < eeprom_len; i++) { 855 ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len,
802 if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, 856 eeprom_buff);
803 &eeprom_buff[i])))
804 break;
805 }
806 857
807 /* Device's eeprom is always little-endian, word addressable */ 858 /* Device's eeprom is always little-endian, word addressable */
808 for (i = 0; i < eeprom_len; i++) 859 for (i = 0; i < eeprom_len; i++)
@@ -820,16 +871,20 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 871 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 872 char firmware_version[32];
822 873
823 strncpy(drvinfo->driver, ixgbe_driver_name, 32); 874 strncpy(drvinfo->driver, ixgbe_driver_name,
824 strncpy(drvinfo->version, ixgbe_driver_version, 32); 875 sizeof(drvinfo->driver) - 1);
876 strncpy(drvinfo->version, ixgbe_driver_version,
877 sizeof(drvinfo->version) - 1);
825 878
826 sprintf(firmware_version, "%d.%d-%d", 879 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
827 (adapter->eeprom_version & 0xF000) >> 12, 880 (adapter->eeprom_version & 0xF000) >> 12,
828 (adapter->eeprom_version & 0x0FF0) >> 4, 881 (adapter->eeprom_version & 0x0FF0) >> 4,
829 adapter->eeprom_version & 0x000F); 882 adapter->eeprom_version & 0x000F);
830 883
831 strncpy(drvinfo->fw_version, firmware_version, 32); 884 strncpy(drvinfo->fw_version, firmware_version,
832 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 885 sizeof(drvinfo->fw_version));
886 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
887 sizeof(drvinfo->bus_info));
833 drvinfo->n_stats = IXGBE_STATS_LEN; 888 drvinfo->n_stats = IXGBE_STATS_LEN;
834 drvinfo->testinfo_len = IXGBE_TEST_LEN; 889 drvinfo->testinfo_len = IXGBE_TEST_LEN;
835 drvinfo->regdump_len = ixgbe_get_regs_len(netdev); 890 drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -879,7 +934,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
879 } 934 }
880 935
881 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 936 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
882 msleep(1); 937 usleep_range(1000, 2000);
883 938
884 if (!netif_running(adapter->netdev)) { 939 if (!netif_running(adapter->netdev)) {
885 for (i = 0; i < adapter->num_tx_queues; i++) 940 for (i = 0; i < adapter->num_tx_queues; i++)
@@ -902,13 +957,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
902 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 957 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
903 sizeof(struct ixgbe_ring)); 958 sizeof(struct ixgbe_ring));
904 temp_tx_ring[i].count = new_tx_count; 959 temp_tx_ring[i].count = new_tx_count;
905 err = ixgbe_setup_tx_resources(adapter, 960 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
906 &temp_tx_ring[i]);
907 if (err) { 961 if (err) {
908 while (i) { 962 while (i) {
909 i--; 963 i--;
910 ixgbe_free_tx_resources(adapter, 964 ixgbe_free_tx_resources(&temp_tx_ring[i]);
911 &temp_tx_ring[i]);
912 } 965 }
913 goto clear_reset; 966 goto clear_reset;
914 } 967 }
@@ -927,13 +980,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
927 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 980 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
928 sizeof(struct ixgbe_ring)); 981 sizeof(struct ixgbe_ring));
929 temp_rx_ring[i].count = new_rx_count; 982 temp_rx_ring[i].count = new_rx_count;
930 err = ixgbe_setup_rx_resources(adapter, 983 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
931 &temp_rx_ring[i]);
932 if (err) { 984 if (err) {
933 while (i) { 985 while (i) {
934 i--; 986 i--;
935 ixgbe_free_rx_resources(adapter, 987 ixgbe_free_rx_resources(&temp_rx_ring[i]);
936 &temp_rx_ring[i]);
937 } 988 }
938 goto err_setup; 989 goto err_setup;
939 } 990 }
@@ -948,8 +999,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
948 /* tx */ 999 /* tx */
949 if (new_tx_count != adapter->tx_ring_count) { 1000 if (new_tx_count != adapter->tx_ring_count) {
950 for (i = 0; i < adapter->num_tx_queues; i++) { 1001 for (i = 0; i < adapter->num_tx_queues; i++) {
951 ixgbe_free_tx_resources(adapter, 1002 ixgbe_free_tx_resources(adapter->tx_ring[i]);
952 adapter->tx_ring[i]);
953 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 1003 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
954 sizeof(struct ixgbe_ring)); 1004 sizeof(struct ixgbe_ring));
955 } 1005 }
@@ -959,8 +1009,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
959 /* rx */ 1009 /* rx */
960 if (new_rx_count != adapter->rx_ring_count) { 1010 if (new_rx_count != adapter->rx_ring_count) {
961 for (i = 0; i < adapter->num_rx_queues; i++) { 1011 for (i = 0; i < adapter->num_rx_queues; i++) {
962 ixgbe_free_rx_resources(adapter, 1012 ixgbe_free_rx_resources(adapter->rx_ring[i]);
963 adapter->rx_ring[i]);
964 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 1013 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
965 sizeof(struct ixgbe_ring)); 1014 sizeof(struct ixgbe_ring));
966 } 1015 }
@@ -984,9 +1033,6 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
984 return IXGBE_TEST_LEN; 1033 return IXGBE_TEST_LEN;
985 case ETH_SS_STATS: 1034 case ETH_SS_STATS:
986 return IXGBE_STATS_LEN; 1035 return IXGBE_STATS_LEN;
987 case ETH_SS_NTUPLE_FILTERS:
988 return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
989 ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
990 default: 1036 default:
991 return -EOPNOTSUPP; 1037 return -EOPNOTSUPP;
992 } 1038 }
@@ -996,12 +1042,11 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
996 struct ethtool_stats *stats, u64 *data) 1042 struct ethtool_stats *stats, u64 *data)
997{ 1043{
998 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1044 struct ixgbe_adapter *adapter = netdev_priv(netdev);
999 u64 *queue_stat;
1000 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
1001 struct rtnl_link_stats64 temp; 1045 struct rtnl_link_stats64 temp;
1002 const struct rtnl_link_stats64 *net_stats; 1046 const struct rtnl_link_stats64 *net_stats;
1003 int j, k; 1047 unsigned int start;
1004 int i; 1048 struct ixgbe_ring *ring;
1049 int i, j;
1005 char *p = NULL; 1050 char *p = NULL;
1006 1051
1007 ixgbe_update_stats(adapter); 1052 ixgbe_update_stats(adapter);
@@ -1022,16 +1067,22 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
1022 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1067 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1023 } 1068 }
1024 for (j = 0; j < adapter->num_tx_queues; j++) { 1069 for (j = 0; j < adapter->num_tx_queues; j++) {
1025 queue_stat = (u64 *)&adapter->tx_ring[j]->stats; 1070 ring = adapter->tx_ring[j];
1026 for (k = 0; k < stat_count; k++) 1071 do {
1027 data[i + k] = queue_stat[k]; 1072 start = u64_stats_fetch_begin_bh(&ring->syncp);
1028 i += k; 1073 data[i] = ring->stats.packets;
1074 data[i+1] = ring->stats.bytes;
1075 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1076 i += 2;
1029 } 1077 }
1030 for (j = 0; j < adapter->num_rx_queues; j++) { 1078 for (j = 0; j < adapter->num_rx_queues; j++) {
1031 queue_stat = (u64 *)&adapter->rx_ring[j]->stats; 1079 ring = adapter->rx_ring[j];
1032 for (k = 0; k < stat_count; k++) 1080 do {
1033 data[i + k] = queue_stat[k]; 1081 start = u64_stats_fetch_begin_bh(&ring->syncp);
1034 i += k; 1082 data[i] = ring->stats.packets;
1083 data[i+1] = ring->stats.bytes;
1084 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
1085 i += 2;
1035 } 1086 }
1036 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 1087 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
1037 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) { 1088 for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
@@ -1136,7 +1187,7 @@ struct ixgbe_reg_test {
1136#define TABLE64_TEST_HI 6 1187#define TABLE64_TEST_HI 6
1137 1188
1138/* default 82599 register test */ 1189/* default 82599 register test */
1139static struct ixgbe_reg_test reg_test_82599[] = { 1190static const struct ixgbe_reg_test reg_test_82599[] = {
1140 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1191 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1141 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1192 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1142 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1193 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1160,7 +1211,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
1160}; 1211};
1161 1212
1162/* default 82598 register test */ 1213/* default 82598 register test */
1163static struct ixgbe_reg_test reg_test_82598[] = { 1214static const struct ixgbe_reg_test reg_test_82598[] = {
1164 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1215 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1165 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1216 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1166 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1217 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1187,54 +1238,82 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1187 { 0, 0, 0, 0 } 1238 { 0, 0, 0, 0 }
1188}; 1239};
1189 1240
1190#define REG_PATTERN_TEST(R, M, W) \ 1241static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
1191{ \ 1242 u32 mask, u32 write)
1192 u32 pat, val, before; \ 1243{
1193 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 1244 u32 pat, val, before;
1194 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \ 1245 static const u32 test_pattern[] = {
1195 before = readl(adapter->hw.hw_addr + R); \ 1246 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1196 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1247
1197 val = readl(adapter->hw.hw_addr + R); \ 1248 for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) {
1198 if (val != (_test[pat] & W & M)) { \ 1249 before = readl(adapter->hw.hw_addr + reg);
1199 e_err(drv, "pattern test reg %04X failed: got " \ 1250 writel((test_pattern[pat] & write),
1200 "0x%08X expected 0x%08X\n", \ 1251 (adapter->hw.hw_addr + reg));
1201 R, val, (_test[pat] & W & M)); \ 1252 val = readl(adapter->hw.hw_addr + reg);
1202 *data = R; \ 1253 if (val != (test_pattern[pat] & write & mask)) {
1203 writel(before, adapter->hw.hw_addr + R); \ 1254 e_err(drv, "pattern test reg %04X failed: got "
1204 return 1; \ 1255 "0x%08X expected 0x%08X\n",
1205 } \ 1256 reg, val, (test_pattern[pat] & write & mask));
1206 writel(before, adapter->hw.hw_addr + R); \ 1257 *data = reg;
1207 } \ 1258 writel(before, adapter->hw.hw_addr + reg);
1259 return 1;
1260 }
1261 writel(before, adapter->hw.hw_addr + reg);
1262 }
1263 return 0;
1208} 1264}
1209 1265
1210#define REG_SET_AND_CHECK(R, M, W) \ 1266static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
1211{ \ 1267 u32 mask, u32 write)
1212 u32 val, before; \ 1268{
1213 before = readl(adapter->hw.hw_addr + R); \ 1269 u32 val, before;
1214 writel((W & M), (adapter->hw.hw_addr + R)); \ 1270 before = readl(adapter->hw.hw_addr + reg);
1215 val = readl(adapter->hw.hw_addr + R); \ 1271 writel((write & mask), (adapter->hw.hw_addr + reg));
1216 if ((W & M) != (val & M)) { \ 1272 val = readl(adapter->hw.hw_addr + reg);
1217 e_err(drv, "set/check reg %04X test failed: got 0x%08X " \ 1273 if ((write & mask) != (val & mask)) {
1218 "expected 0x%08X\n", R, (val & M), (W & M)); \ 1274 e_err(drv, "set/check reg %04X test failed: got 0x%08X "
1219 *data = R; \ 1275 "expected 0x%08X\n", reg, (val & mask), (write & mask));
1220 writel(before, (adapter->hw.hw_addr + R)); \ 1276 *data = reg;
1221 return 1; \ 1277 writel(before, (adapter->hw.hw_addr + reg));
1222 } \ 1278 return 1;
1223 writel(before, (adapter->hw.hw_addr + R)); \ 1279 }
1280 writel(before, (adapter->hw.hw_addr + reg));
1281 return 0;
1224} 1282}
1225 1283
1284#define REG_PATTERN_TEST(reg, mask, write) \
1285 do { \
1286 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1287 return 1; \
1288 } while (0) \
1289
1290
1291#define REG_SET_AND_CHECK(reg, mask, write) \
1292 do { \
1293 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1294 return 1; \
1295 } while (0) \
1296
1226static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1297static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1227{ 1298{
1228 struct ixgbe_reg_test *test; 1299 const struct ixgbe_reg_test *test;
1229 u32 value, before, after; 1300 u32 value, before, after;
1230 u32 i, toggle; 1301 u32 i, toggle;
1231 1302
1232 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1303 switch (adapter->hw.mac.type) {
1233 toggle = 0x7FFFF30F; 1304 case ixgbe_mac_82598EB:
1234 test = reg_test_82599;
1235 } else {
1236 toggle = 0x7FFFF3FF; 1305 toggle = 0x7FFFF3FF;
1237 test = reg_test_82598; 1306 test = reg_test_82598;
1307 break;
1308 case ixgbe_mac_82599EB:
1309 case ixgbe_mac_X540:
1310 toggle = 0x7FFFF30F;
1311 test = reg_test_82599;
1312 break;
1313 default:
1314 *data = 1;
1315 return 1;
1316 break;
1238 } 1317 }
1239 1318
1240 /* 1319 /*
@@ -1265,13 +1344,13 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1265 switch (test->test_type) { 1344 switch (test->test_type) {
1266 case PATTERN_TEST: 1345 case PATTERN_TEST:
1267 REG_PATTERN_TEST(test->reg + (i * 0x40), 1346 REG_PATTERN_TEST(test->reg + (i * 0x40),
1268 test->mask, 1347 test->mask,
1269 test->write); 1348 test->write);
1270 break; 1349 break;
1271 case SET_READ_TEST: 1350 case SET_READ_TEST:
1272 REG_SET_AND_CHECK(test->reg + (i * 0x40), 1351 REG_SET_AND_CHECK(test->reg + (i * 0x40),
1273 test->mask, 1352 test->mask,
1274 test->write); 1353 test->write);
1275 break; 1354 break;
1276 case WRITE_NO_TEST: 1355 case WRITE_NO_TEST:
1277 writel(test->write, 1356 writel(test->write,
@@ -1280,18 +1359,18 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1280 break; 1359 break;
1281 case TABLE32_TEST: 1360 case TABLE32_TEST:
1282 REG_PATTERN_TEST(test->reg + (i * 4), 1361 REG_PATTERN_TEST(test->reg + (i * 4),
1283 test->mask, 1362 test->mask,
1284 test->write); 1363 test->write);
1285 break; 1364 break;
1286 case TABLE64_TEST_LO: 1365 case TABLE64_TEST_LO:
1287 REG_PATTERN_TEST(test->reg + (i * 8), 1366 REG_PATTERN_TEST(test->reg + (i * 8),
1288 test->mask, 1367 test->mask,
1289 test->write); 1368 test->write);
1290 break; 1369 break;
1291 case TABLE64_TEST_HI: 1370 case TABLE64_TEST_HI:
1292 REG_PATTERN_TEST((test->reg + 4) + (i * 8), 1371 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
1293 test->mask, 1372 test->mask,
1294 test->write); 1373 test->write);
1295 break; 1374 break;
1296 } 1375 }
1297 } 1376 }
@@ -1354,7 +1433,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1354 1433
1355 /* Disable all the interrupts */ 1434 /* Disable all the interrupts */
1356 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1435 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1357 msleep(10); 1436 usleep_range(10000, 20000);
1358 1437
1359 /* Test each interrupt */ 1438 /* Test each interrupt */
1360 for (; i < 10; i++) { 1439 for (; i < 10; i++) {
@@ -1374,7 +1453,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1374 ~mask & 0x00007FFF); 1453 ~mask & 0x00007FFF);
1375 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1454 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1376 ~mask & 0x00007FFF); 1455 ~mask & 0x00007FFF);
1377 msleep(10); 1456 usleep_range(10000, 20000);
1378 1457
1379 if (adapter->test_icr & mask) { 1458 if (adapter->test_icr & mask) {
1380 *data = 3; 1459 *data = 3;
@@ -1391,7 +1470,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1391 adapter->test_icr = 0; 1470 adapter->test_icr = 0;
1392 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1471 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
1393 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 1472 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
1394 msleep(10); 1473 usleep_range(10000, 20000);
1395 1474
1396 if (!(adapter->test_icr &mask)) { 1475 if (!(adapter->test_icr &mask)) {
1397 *data = 4; 1476 *data = 4;
@@ -1411,7 +1490,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1411 ~mask & 0x00007FFF); 1490 ~mask & 0x00007FFF);
1412 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 1491 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
1413 ~mask & 0x00007FFF); 1492 ~mask & 0x00007FFF);
1414 msleep(10); 1493 usleep_range(10000, 20000);
1415 1494
1416 if (adapter->test_icr) { 1495 if (adapter->test_icr) {
1417 *data = 5; 1496 *data = 5;
@@ -1422,7 +1501,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
1422 1501
1423 /* Disable all the interrupts */ 1502 /* Disable all the interrupts */
1424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); 1503 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF);
1425 msleep(10); 1504 usleep_range(10000, 20000);
1426 1505
1427 /* Unhook test interrupt handler */ 1506 /* Unhook test interrupt handler */
1428 free_irq(irq, netdev); 1507 free_irq(irq, netdev);
@@ -1435,9 +1514,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1435 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1514 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1436 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1515 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1437 struct ixgbe_hw *hw = &adapter->hw; 1516 struct ixgbe_hw *hw = &adapter->hw;
1438 struct pci_dev *pdev = adapter->pdev;
1439 u32 reg_ctl; 1517 u32 reg_ctl;
1440 int i;
1441 1518
1442 /* shut down the DMA engines now so they can be reinitialized later */ 1519 /* shut down the DMA engines now so they can be reinitialized later */
1443 1520
@@ -1445,237 +1522,86 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1445 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1522 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1446 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1523 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1447 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1524 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1448 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(0)); 1525 ixgbe_disable_rx_queue(adapter, rx_ring);
1449 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1450 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(0), reg_ctl);
1451 1526
1452 /* now Tx */ 1527 /* now Tx */
1453 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(0)); 1528 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1454 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1529 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1455 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(0), reg_ctl); 1530 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1456 if (hw->mac.type == ixgbe_mac_82599EB) { 1531
1532 switch (hw->mac.type) {
1533 case ixgbe_mac_82599EB:
1534 case ixgbe_mac_X540:
1457 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1535 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1458 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1536 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1459 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1537 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1538 break;
1539 default:
1540 break;
1460 } 1541 }
1461 1542
1462 ixgbe_reset(adapter); 1543 ixgbe_reset(adapter);
1463 1544
1464 if (tx_ring->desc && tx_ring->tx_buffer_info) { 1545 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1465 for (i = 0; i < tx_ring->count; i++) { 1546 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1466 struct ixgbe_tx_buffer *buf =
1467 &(tx_ring->tx_buffer_info[i]);
1468 if (buf->dma)
1469 dma_unmap_single(&pdev->dev, buf->dma,
1470 buf->length, DMA_TO_DEVICE);
1471 if (buf->skb)
1472 dev_kfree_skb(buf->skb);
1473 }
1474 }
1475
1476 if (rx_ring->desc && rx_ring->rx_buffer_info) {
1477 for (i = 0; i < rx_ring->count; i++) {
1478 struct ixgbe_rx_buffer *buf =
1479 &(rx_ring->rx_buffer_info[i]);
1480 if (buf->dma)
1481 dma_unmap_single(&pdev->dev, buf->dma,
1482 IXGBE_RXBUFFER_2048,
1483 DMA_FROM_DEVICE);
1484 if (buf->skb)
1485 dev_kfree_skb(buf->skb);
1486 }
1487 }
1488
1489 if (tx_ring->desc) {
1490 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1491 tx_ring->dma);
1492 tx_ring->desc = NULL;
1493 }
1494 if (rx_ring->desc) {
1495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
1496 rx_ring->dma);
1497 rx_ring->desc = NULL;
1498 }
1499
1500 kfree(tx_ring->tx_buffer_info);
1501 tx_ring->tx_buffer_info = NULL;
1502 kfree(rx_ring->rx_buffer_info);
1503 rx_ring->rx_buffer_info = NULL;
1504} 1547}
1505 1548
1506static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1549static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1507{ 1550{
1508 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1551 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1509 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1552 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1510 struct pci_dev *pdev = adapter->pdev;
1511 u32 rctl, reg_data; 1553 u32 rctl, reg_data;
1512 int i, ret_val; 1554 int ret_val;
1555 int err;
1513 1556
1514 /* Setup Tx descriptor ring and Tx buffers */ 1557 /* Setup Tx descriptor ring and Tx buffers */
1558 tx_ring->count = IXGBE_DEFAULT_TXD;
1559 tx_ring->queue_index = 0;
1560 tx_ring->dev = &adapter->pdev->dev;
1561 tx_ring->netdev = adapter->netdev;
1562 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1563 tx_ring->numa_node = adapter->node;
1564
1565 err = ixgbe_setup_tx_resources(tx_ring);
1566 if (err)
1567 return 1;
1515 1568
1516 if (!tx_ring->count) 1569 switch (adapter->hw.mac.type) {
1517 tx_ring->count = IXGBE_DEFAULT_TXD; 1570 case ixgbe_mac_82599EB:
1518 1571 case ixgbe_mac_X540:
1519 tx_ring->tx_buffer_info = kcalloc(tx_ring->count,
1520 sizeof(struct ixgbe_tx_buffer),
1521 GFP_KERNEL);
1522 if (!(tx_ring->tx_buffer_info)) {
1523 ret_val = 1;
1524 goto err_nomem;
1525 }
1526
1527 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
1528 tx_ring->size = ALIGN(tx_ring->size, 4096);
1529 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
1530 &tx_ring->dma, GFP_KERNEL);
1531 if (!(tx_ring->desc)) {
1532 ret_val = 2;
1533 goto err_nomem;
1534 }
1535 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1536
1537 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAL(0),
1538 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1539 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDBAH(0),
1540 ((u64) tx_ring->dma >> 32));
1541 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDLEN(0),
1542 tx_ring->count * sizeof(union ixgbe_adv_tx_desc));
1543 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDH(0), 0);
1544 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), 0);
1545
1546 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1547 reg_data |= IXGBE_HLREG0_TXPADEN;
1548 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1549
1550 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1551 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1572 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1552 reg_data |= IXGBE_DMATXCTL_TE; 1573 reg_data |= IXGBE_DMATXCTL_TE;
1553 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1575 break;
1576 default:
1577 break;
1554 } 1578 }
1555 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_TXDCTL(0));
1556 reg_data |= IXGBE_TXDCTL_ENABLE;
1557 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TXDCTL(0), reg_data);
1558
1559 for (i = 0; i < tx_ring->count; i++) {
1560 union ixgbe_adv_tx_desc *desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
1561 struct sk_buff *skb;
1562 unsigned int size = 1024;
1563
1564 skb = alloc_skb(size, GFP_KERNEL);
1565 if (!skb) {
1566 ret_val = 3;
1567 goto err_nomem;
1568 }
1569 skb_put(skb, size);
1570 tx_ring->tx_buffer_info[i].skb = skb;
1571 tx_ring->tx_buffer_info[i].length = skb->len;
1572 tx_ring->tx_buffer_info[i].dma =
1573 dma_map_single(&pdev->dev, skb->data, skb->len,
1574 DMA_TO_DEVICE);
1575 desc->read.buffer_addr =
1576 cpu_to_le64(tx_ring->tx_buffer_info[i].dma);
1577 desc->read.cmd_type_len = cpu_to_le32(skb->len);
1578 desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD_EOP |
1579 IXGBE_TXD_CMD_IFCS |
1580 IXGBE_TXD_CMD_RS);
1581 desc->read.olinfo_status = 0;
1582 if (adapter->hw.mac.type == ixgbe_mac_82599EB)
1583 desc->read.olinfo_status |=
1584 (skb->len << IXGBE_ADVTXD_PAYLEN_SHIFT);
1585 1579
1586 } 1580 ixgbe_configure_tx_ring(adapter, tx_ring);
1587 1581
1588 /* Setup Rx Descriptor ring and Rx buffers */ 1582 /* Setup Rx Descriptor ring and Rx buffers */
1589 1583 rx_ring->count = IXGBE_DEFAULT_RXD;
1590 if (!rx_ring->count) 1584 rx_ring->queue_index = 0;
1591 rx_ring->count = IXGBE_DEFAULT_RXD; 1585 rx_ring->dev = &adapter->pdev->dev;
1592 1586 rx_ring->netdev = adapter->netdev;
1593 rx_ring->rx_buffer_info = kcalloc(rx_ring->count, 1587 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1594 sizeof(struct ixgbe_rx_buffer), 1588 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1595 GFP_KERNEL); 1589 rx_ring->numa_node = adapter->node;
1596 if (!(rx_ring->rx_buffer_info)) { 1590
1591 err = ixgbe_setup_rx_resources(rx_ring);
1592 if (err) {
1597 ret_val = 4; 1593 ret_val = 4;
1598 goto err_nomem; 1594 goto err_nomem;
1599 } 1595 }
1600 1596
1601 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
1602 rx_ring->size = ALIGN(rx_ring->size, 4096);
1603 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1604 &rx_ring->dma, GFP_KERNEL);
1605 if (!(rx_ring->desc)) {
1606 ret_val = 5;
1607 goto err_nomem;
1608 }
1609 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1610
1611 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 1597 rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
1612 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); 1598 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
1613 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAL(0),
1614 ((u64)rx_ring->dma & 0xFFFFFFFF));
1615 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDBAH(0),
1616 ((u64) rx_ring->dma >> 32));
1617 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDLEN(0), rx_ring->size);
1618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDH(0), 0);
1619 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), 0);
1620 1599
1621 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 1600 ixgbe_configure_rx_ring(adapter, rx_ring);
1622 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1624
1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 reg_data &= ~IXGBE_HLREG0_LPBK;
1627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1628
1629 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RDRXCTL);
1630#define IXGBE_RDRXCTL_RDMTS_MASK 0x00000003 /* Receive Descriptor Minimum
1631 Threshold Size mask */
1632 reg_data &= ~IXGBE_RDRXCTL_RDMTS_MASK;
1633 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDRXCTL, reg_data);
1634
1635 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MCSTCTRL);
1636#define IXGBE_MCSTCTRL_MO_MASK 0x00000003 /* Multicast Offset mask */
1637 reg_data &= ~IXGBE_MCSTCTRL_MO_MASK;
1638 reg_data |= adapter->hw.mac.mc_filter_type;
1639 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MCSTCTRL, reg_data);
1640
1641 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(0));
1642 reg_data |= IXGBE_RXDCTL_ENABLE;
1643 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
1644 if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
1645 int j = adapter->rx_ring[0]->reg_idx;
1646 u32 k;
1647 for (k = 0; k < 10; k++) {
1648 if (IXGBE_READ_REG(&adapter->hw,
1649 IXGBE_RXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
1650 break;
1651 else
1652 msleep(1);
1653 }
1654 }
1655 1601
1656 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; 1602 rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS;
1657 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); 1603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
1658 1604
1659 for (i = 0; i < rx_ring->count; i++) {
1660 union ixgbe_adv_rx_desc *rx_desc =
1661 IXGBE_RX_DESC_ADV(*rx_ring, i);
1662 struct sk_buff *skb;
1663
1664 skb = alloc_skb(IXGBE_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
1665 if (!skb) {
1666 ret_val = 6;
1667 goto err_nomem;
1668 }
1669 skb_reserve(skb, NET_IP_ALIGN);
1670 rx_ring->rx_buffer_info[i].skb = skb;
1671 rx_ring->rx_buffer_info[i].dma =
1672 dma_map_single(&pdev->dev, skb->data,
1673 IXGBE_RXBUFFER_2048, DMA_FROM_DEVICE);
1674 rx_desc->read.pkt_addr =
1675 cpu_to_le64(rx_ring->rx_buffer_info[i].dma);
1676 memset(skb->data, 0x00, skb->len);
1677 }
1678
1679 return 0; 1605 return 0;
1680 1606
1681err_nomem: 1607err_nomem:
@@ -1688,17 +1614,29 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
1688 struct ixgbe_hw *hw = &adapter->hw; 1614 struct ixgbe_hw *hw = &adapter->hw;
1689 u32 reg_data; 1615 u32 reg_data;
1690 1616
1691 /* right now we only support MAC loopback in the driver */ 1617 /* X540 needs to set the MACC.FLU bit to force link up */
1618 if (adapter->hw.mac.type == ixgbe_mac_X540) {
1619 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_MACC);
1620 reg_data |= IXGBE_MACC_FLU;
1621 IXGBE_WRITE_REG(&adapter->hw, IXGBE_MACC, reg_data);
1622 }
1692 1623
1693 /* Setup MAC loopback */ 1624 /* right now we only support MAC loopback in the driver */
1694 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0); 1625 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_HLREG0);
1626 /* Setup MAC loopback */
1695 reg_data |= IXGBE_HLREG0_LPBK; 1627 reg_data |= IXGBE_HLREG0_LPBK;
1696 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data); 1628 IXGBE_WRITE_REG(&adapter->hw, IXGBE_HLREG0, reg_data);
1697 1629
1630 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL);
1631 reg_data |= IXGBE_FCTRL_BAM | IXGBE_FCTRL_SBP | IXGBE_FCTRL_MPE;
1632 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCTRL, reg_data);
1633
1698 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC); 1634 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_AUTOC);
1699 reg_data &= ~IXGBE_AUTOC_LMS_MASK; 1635 reg_data &= ~IXGBE_AUTOC_LMS_MASK;
1700 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU; 1636 reg_data |= IXGBE_AUTOC_LMS_10G_LINK_NO_AN | IXGBE_AUTOC_FLU;
1701 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data); 1637 IXGBE_WRITE_REG(&adapter->hw, IXGBE_AUTOC, reg_data);
1638 IXGBE_WRITE_FLUSH(&adapter->hw);
1639 usleep_range(10000, 20000);
1702 1640
1703 /* Disable Atlas Tx lanes; re-enabled in reset path */ 1641 /* Disable Atlas Tx lanes; re-enabled in reset path */
1704 if (hw->mac.type == ixgbe_mac_82598EB) { 1642 if (hw->mac.type == ixgbe_mac_82598EB) {
@@ -1756,15 +1694,80 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1756 return 13; 1694 return 13;
1757} 1695}
1758 1696
1697static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1698 struct ixgbe_ring *tx_ring,
1699 unsigned int size)
1700{
1701 union ixgbe_adv_rx_desc *rx_desc;
1702 struct ixgbe_rx_buffer *rx_buffer_info;
1703 struct ixgbe_tx_buffer *tx_buffer_info;
1704 const int bufsz = rx_ring->rx_buf_len;
1705 u32 staterr;
1706 u16 rx_ntc, tx_ntc, count = 0;
1707
1708 /* initialize next to clean and descriptor values */
1709 rx_ntc = rx_ring->next_to_clean;
1710 tx_ntc = tx_ring->next_to_clean;
1711 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1712 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1713
1714 while (staterr & IXGBE_RXD_STAT_DD) {
1715 /* check Rx buffer */
1716 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1717
1718 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1719 dma_unmap_single(rx_ring->dev,
1720 rx_buffer_info->dma,
1721 bufsz,
1722 DMA_FROM_DEVICE);
1723 rx_buffer_info->dma = 0;
1724
1725 /* verify contents of skb */
1726 if (!ixgbe_check_lbtest_frame(rx_buffer_info->skb, size))
1727 count++;
1728
1729 /* unmap buffer on Tx side */
1730 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1731 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1732
1733 /* increment Rx/Tx next to clean counters */
1734 rx_ntc++;
1735 if (rx_ntc == rx_ring->count)
1736 rx_ntc = 0;
1737 tx_ntc++;
1738 if (tx_ntc == tx_ring->count)
1739 tx_ntc = 0;
1740
1741 /* fetch next descriptor */
1742 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, rx_ntc);
1743 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1744 }
1745
1746 /* re-map buffers to ring, store next to clean values */
1747 ixgbe_alloc_rx_buffers(rx_ring, count);
1748 rx_ring->next_to_clean = rx_ntc;
1749 tx_ring->next_to_clean = tx_ntc;
1750
1751 return count;
1752}
1753
1759static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) 1754static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1760{ 1755{
1761 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; 1756 struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
1762 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; 1757 struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
1763 struct pci_dev *pdev = adapter->pdev; 1758 int i, j, lc, good_cnt, ret_val = 0;
1764 int i, j, k, l, lc, good_cnt, ret_val = 0; 1759 unsigned int size = 1024;
1765 unsigned long time; 1760 netdev_tx_t tx_ret_val;
1761 struct sk_buff *skb;
1762
1763 /* allocate test skb */
1764 skb = alloc_skb(size, GFP_KERNEL);
1765 if (!skb)
1766 return 11;
1766 1767
1767 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RDT(0), rx_ring->count - 1); 1768 /* place data into test skb */
1769 ixgbe_create_lbtest_frame(skb, size);
1770 skb_put(skb, size);
1768 1771
1769 /* 1772 /*
1770 * Calculate the loop count based on the largest descriptor ring 1773 * Calculate the loop count based on the largest descriptor ring
@@ -1777,54 +1780,38 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1777 else 1780 else
1778 lc = ((rx_ring->count / 64) * 2) + 1; 1781 lc = ((rx_ring->count / 64) * 2) + 1;
1779 1782
1780 k = l = 0;
1781 for (j = 0; j <= lc; j++) { 1783 for (j = 0; j <= lc; j++) {
1782 for (i = 0; i < 64; i++) { 1784 /* reset count of good packets */
1783 ixgbe_create_lbtest_frame(
1784 tx_ring->tx_buffer_info[k].skb,
1785 1024);
1786 dma_sync_single_for_device(&pdev->dev,
1787 tx_ring->tx_buffer_info[k].dma,
1788 tx_ring->tx_buffer_info[k].length,
1789 DMA_TO_DEVICE);
1790 if (unlikely(++k == tx_ring->count))
1791 k = 0;
1792 }
1793 IXGBE_WRITE_REG(&adapter->hw, IXGBE_TDT(0), k);
1794 msleep(200);
1795 /* set the start time for the receive */
1796 time = jiffies;
1797 good_cnt = 0; 1785 good_cnt = 0;
1798 do { 1786
1799 /* receive the sent packets */ 1787 /* place 64 packets on the transmit queue*/
1800 dma_sync_single_for_cpu(&pdev->dev, 1788 for (i = 0; i < 64; i++) {
1801 rx_ring->rx_buffer_info[l].dma, 1789 skb_get(skb);
1802 IXGBE_RXBUFFER_2048, 1790 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1803 DMA_FROM_DEVICE); 1791 adapter,
1804 ret_val = ixgbe_check_lbtest_frame( 1792 tx_ring);
1805 rx_ring->rx_buffer_info[l].skb, 1024); 1793 if (tx_ret_val == NETDEV_TX_OK)
1806 if (!ret_val)
1807 good_cnt++; 1794 good_cnt++;
1808 if (++l == rx_ring->count) 1795 }
1809 l = 0; 1796
1810 /*
1811 * time + 20 msecs (200 msecs on 2.4) is more than
1812 * enough time to complete the receives, if it's
1813 * exceeded, break and error off
1814 */
1815 } while (good_cnt < 64 && jiffies < (time + 20));
1816 if (good_cnt != 64) { 1797 if (good_cnt != 64) {
1817 /* ret_val is the same as mis-compare */ 1798 ret_val = 12;
1818 ret_val = 13;
1819 break; 1799 break;
1820 } 1800 }
1821 if (jiffies >= (time + 20)) { 1801
1822 /* Error code for time out error */ 1802 /* allow 200 milliseconds for packets to go from Tx to Rx */
1823 ret_val = 14; 1803 msleep(200);
1804
1805 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1806 if (good_cnt != 64) {
1807 ret_val = 13;
1824 break; 1808 break;
1825 } 1809 }
1826 } 1810 }
1827 1811
1812 /* free the original skb */
1813 kfree_skb(skb);
1814
1828 return ret_val; 1815 return ret_val;
1829} 1816}
1830 1817
@@ -1947,7 +1934,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1947 struct ixgbe_hw *hw = &adapter->hw; 1934 struct ixgbe_hw *hw = &adapter->hw;
1948 int retval = 1; 1935 int retval = 1;
1949 1936
1937 /* WOL not supported except for the following */
1950 switch(hw->device_id) { 1938 switch(hw->device_id) {
1939 case IXGBE_DEV_ID_82599_SFP:
1940 /* Only this subdevice supports WOL */
1941 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1942 wol->supported = 0;
1943 break;
1944 }
1945 retval = 0;
1946 break;
1947 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1948 /* All except this subdevice support WOL */
1949 if (hw->subsystem_device_id ==
1950 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1951 wol->supported = 0;
1952 break;
1953 }
1954 retval = 0;
1955 break;
1951 case IXGBE_DEV_ID_82599_KX4: 1956 case IXGBE_DEV_ID_82599_KX4:
1952 retval = 0; 1957 retval = 0;
1953 break; 1958 break;
@@ -2017,25 +2022,30 @@ static int ixgbe_nway_reset(struct net_device *netdev)
2017 return 0; 2022 return 0;
2018} 2023}
2019 2024
2020static int ixgbe_phys_id(struct net_device *netdev, u32 data) 2025static int ixgbe_set_phys_id(struct net_device *netdev,
2026 enum ethtool_phys_id_state state)
2021{ 2027{
2022 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2028 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2023 struct ixgbe_hw *hw = &adapter->hw; 2029 struct ixgbe_hw *hw = &adapter->hw;
2024 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2025 u32 i;
2026 2030
2027 if (!data || data > 300) 2031 switch (state) {
2028 data = 300; 2032 case ETHTOOL_ID_ACTIVE:
2033 adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
2034 return 2;
2029 2035
2030 for (i = 0; i < (data * 1000); i += 400) { 2036 case ETHTOOL_ID_ON:
2031 hw->mac.ops.led_on(hw, IXGBE_LED_ON); 2037 hw->mac.ops.led_on(hw, IXGBE_LED_ON);
2032 msleep_interruptible(200); 2038 break;
2039
2040 case ETHTOOL_ID_OFF:
2033 hw->mac.ops.led_off(hw, IXGBE_LED_ON); 2041 hw->mac.ops.led_off(hw, IXGBE_LED_ON);
2034 msleep_interruptible(200); 2042 break;
2035 }
2036 2043
2037 /* Restore LED settings */ 2044 case ETHTOOL_ID_INACTIVE:
2038 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, led_reg); 2045 /* Restore LED settings */
2046 IXGBE_WRITE_REG(&adapter->hw, IXGBE_LEDCTL, adapter->led_reg);
2047 break;
2048 }
2039 2049
2040 return 0; 2050 return 0;
2041} 2051}
@@ -2085,6 +2095,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
2085 return 0; 2095 return 0;
2086} 2096}
2087 2097
2098/*
2099 * this function must be called before setting the new value of
2100 * rx_itr_setting
2101 */
2102static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2103 struct ethtool_coalesce *ec)
2104{
2105 struct net_device *netdev = adapter->netdev;
2106
2107 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2108 return false;
2109
2110 /* if interrupt rate is too high then disable RSC */
2111 if (ec->rx_coalesce_usecs != 1 &&
2112 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2113 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2114 e_info(probe, "rx-usecs set too low, "
2115 "disabling RSC\n");
2116 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2117 return true;
2118 }
2119 } else {
2120 /* check the feature flag value and enable RSC if necessary */
2121 if ((netdev->features & NETIF_F_LRO) &&
2122 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2123 e_info(probe, "rx-usecs set to %d, "
2124 "re-enabling RSC\n",
2125 ec->rx_coalesce_usecs);
2126 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2127 return true;
2128 }
2129 }
2130 return false;
2131}
2132
2088static int ixgbe_set_coalesce(struct net_device *netdev, 2133static int ixgbe_set_coalesce(struct net_device *netdev,
2089 struct ethtool_coalesce *ec) 2134 struct ethtool_coalesce *ec)
2090{ 2135{
@@ -2102,17 +2147,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2102 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2147 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2103 2148
2104 if (ec->rx_coalesce_usecs > 1) { 2149 if (ec->rx_coalesce_usecs > 1) {
2105 u32 max_int;
2106 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2107 max_int = IXGBE_MAX_RSC_INT_RATE;
2108 else
2109 max_int = IXGBE_MAX_INT_RATE;
2110
2111 /* check the limits */ 2150 /* check the limits */
2112 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2151 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2113 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2152 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2114 return -EINVAL; 2153 return -EINVAL;
2115 2154
2155 /* check the old value and enable RSC if necessary */
2156 need_reset = ixgbe_update_rsc(adapter, ec);
2157
2116 /* store the value in ints/second */ 2158 /* store the value in ints/second */
2117 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2159 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2118 2160
@@ -2121,32 +2163,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2121 /* clear the lower bit as its used for dynamic state */ 2163 /* clear the lower bit as its used for dynamic state */
2122 adapter->rx_itr_setting &= ~1; 2164 adapter->rx_itr_setting &= ~1;
2123 } else if (ec->rx_coalesce_usecs == 1) { 2165 } else if (ec->rx_coalesce_usecs == 1) {
2166 /* check the old value and enable RSC if necessary */
2167 need_reset = ixgbe_update_rsc(adapter, ec);
2168
2124 /* 1 means dynamic mode */ 2169 /* 1 means dynamic mode */
2125 adapter->rx_eitr_param = 20000; 2170 adapter->rx_eitr_param = 20000;
2126 adapter->rx_itr_setting = 1; 2171 adapter->rx_itr_setting = 1;
2127 } else { 2172 } else {
2173 /* check the old value and enable RSC if necessary */
2174 need_reset = ixgbe_update_rsc(adapter, ec);
2128 /* 2175 /*
2129 * any other value means disable eitr, which is best 2176 * any other value means disable eitr, which is best
2130 * served by setting the interrupt rate very high 2177 * served by setting the interrupt rate very high
2131 */ 2178 */
2132 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2179 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2133 adapter->rx_itr_setting = 0; 2180 adapter->rx_itr_setting = 0;
2134
2135 /*
2136 * if hardware RSC is enabled, disable it when
2137 * setting low latency mode, to avoid errata, assuming
2138 * that when the user set low latency mode they want
2139 * it at the cost of anything else
2140 */
2141 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2142 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2143 if (netdev->features & NETIF_F_LRO) {
2144 netdev->features &= ~NETIF_F_LRO;
2145 e_info(probe, "rx-usecs set to 0, "
2146 "disabling RSC\n");
2147 }
2148 need_reset = true;
2149 }
2150 } 2181 }
2151 2182
2152 if (ec->tx_coalesce_usecs > 1) { 2183 if (ec->tx_coalesce_usecs > 1) {
@@ -2218,33 +2249,59 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2218 bool need_reset = false; 2249 bool need_reset = false;
2219 int rc; 2250 int rc;
2220 2251
2221 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE); 2252#ifdef CONFIG_IXGBE_DCB
2253 if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) &&
2254 !(data & ETH_FLAG_RXVLAN))
2255 return -EINVAL;
2256#endif
2257
2258 need_reset = (data & ETH_FLAG_RXVLAN) !=
2259 (netdev->features & NETIF_F_HW_VLAN_RX);
2260
2261 if ((data & ETH_FLAG_RXHASH) &&
2262 !(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
2263 return -EOPNOTSUPP;
2264
2265 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2266 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN |
2267 ETH_FLAG_RXHASH);
2222 if (rc) 2268 if (rc)
2223 return rc; 2269 return rc;
2224 2270
2225 /* if state changes we need to update adapter->flags and reset */ 2271 /* if state changes we need to update adapter->flags and reset */
2226 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2272 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2227 /* 2273 (!!(data & ETH_FLAG_LRO) !=
2228 * cast both to bool and verify if they are set the same 2274 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2229 * but only enable RSC if itr is non-zero, as 2275 if ((data & ETH_FLAG_LRO) &&
2230 * itr=0 and RSC are mutually exclusive 2276 (!adapter->rx_itr_setting ||
2231 */ 2277 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2232 if (((!!(data & ETH_FLAG_LRO)) != 2278 e_info(probe, "rx-usecs set too low, "
2233 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2279 "not enabling RSC.\n");
2234 adapter->rx_itr_setting) { 2280 } else {
2235 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2281 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2236 switch (adapter->hw.mac.type) { 2282 switch (adapter->hw.mac.type) {
2237 case ixgbe_mac_82599EB: 2283 case ixgbe_mac_82599EB:
2238 need_reset = true; 2284 need_reset = true;
2239 break; 2285 break;
2286 case ixgbe_mac_X540: {
2287 int i;
2288 for (i = 0; i < adapter->num_rx_queues; i++) {
2289 struct ixgbe_ring *ring =
2290 adapter->rx_ring[i];
2291 if (adapter->flags2 &
2292 IXGBE_FLAG2_RSC_ENABLED) {
2293 ixgbe_configure_rscctl(adapter,
2294 ring);
2295 } else {
2296 ixgbe_clear_rscctl(adapter,
2297 ring);
2298 }
2299 }
2300 }
2301 break;
2240 default: 2302 default:
2241 break; 2303 break;
2242 } 2304 }
2243 } else if (!adapter->rx_itr_setting) {
2244 netdev->features &= ~NETIF_F_LRO;
2245 if (data & ETH_FLAG_LRO)
2246 e_info(probe, "rx-usecs set to 0, "
2247 "LRO/RSC cannot be enabled.\n");
2248 } 2305 }
2249 } 2306 }
2250 2307
@@ -2282,10 +2339,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2282 struct ethtool_rx_ntuple *cmd) 2339 struct ethtool_rx_ntuple *cmd)
2283{ 2340{
2284 struct ixgbe_adapter *adapter = netdev_priv(dev); 2341 struct ixgbe_adapter *adapter = netdev_priv(dev);
2285 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2342 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2286 struct ixgbe_atr_input input_struct; 2343 union ixgbe_atr_input input_struct;
2287 struct ixgbe_atr_input_masks input_masks; 2344 struct ixgbe_atr_input_masks input_masks;
2288 int target_queue; 2345 int target_queue;
2346 int err;
2289 2347
2290 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2348 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2291 return -EOPNOTSUPP; 2349 return -EOPNOTSUPP;
@@ -2294,67 +2352,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2294 * Don't allow programming if the action is a queue greater than 2352 * Don't allow programming if the action is a queue greater than
2295 * the number of online Tx queues. 2353 * the number of online Tx queues.
2296 */ 2354 */
2297 if ((fs.action >= adapter->num_tx_queues) || 2355 if ((fs->action >= adapter->num_tx_queues) ||
2298 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2356 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2299 return -EINVAL; 2357 return -EINVAL;
2300 2358
2301 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2359 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2302 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2360 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2303 2361
2304 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2362 /* record flow type */
2305 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2363 switch (fs->flow_type) {
2306 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2364 case IPV4_FLOW:
2307 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2365 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2308 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2366 break;
2309 /* only use the lowest 2 bytes for flex bytes */
2310 input_masks.data_mask = (fs.data_mask & 0xffff);
2311
2312 switch (fs.flow_type) {
2313 case TCP_V4_FLOW: 2367 case TCP_V4_FLOW:
2314 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2368 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2315 break; 2369 break;
2316 case UDP_V4_FLOW: 2370 case UDP_V4_FLOW:
2317 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2371 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2318 break; 2372 break;
2319 case SCTP_V4_FLOW: 2373 case SCTP_V4_FLOW:
2320 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2374 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2321 break; 2375 break;
2322 default: 2376 default:
2323 return -1; 2377 return -1;
2324 } 2378 }
2325 2379
2326 /* Mask bits from the inputs based on user-supplied mask */ 2380 /* copy vlan tag minus the CFI bit */
2327 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2381 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2328 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2382 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2329 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2383 if (!fs->vlan_tag_mask) {
2330 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2384 input_masks.vlan_id_mask = htons(0xEFFF);
2331 /* 82599 expects these to be byte-swapped for perfect filtering */ 2385 } else {
2332 ixgbe_atr_set_src_port_82599(&input_struct, 2386 switch (~fs->vlan_tag_mask & 0xEFFF) {
2333 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2387 /* all of these are valid vlan-mask values */
2334 ixgbe_atr_set_dst_port_82599(&input_struct, 2388 case 0xEFFF:
2335 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2389 case 0xE000:
2336 2390 case 0x0FFF:
2337 /* VLAN and Flex bytes are either completely masked or not */ 2391 case 0x0000:
2338 if (!fs.vlan_tag_mask) 2392 input_masks.vlan_id_mask =
2339 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2393 htons(~fs->vlan_tag_mask);
2340 2394 break;
2341 if (!input_masks.data_mask) 2395 /* exit with error if vlan-mask is invalid */
2342 /* make sure we only use the first 2 bytes of user data */ 2396 default:
2343 ixgbe_atr_set_flex_byte_82599(&input_struct, 2397 e_err(drv, "Partial VLAN ID or "
2344 (fs.data & 0xffff)); 2398 "priority mask in vlan-mask is not "
2399 "supported by hardware\n");
2400 return -1;
2401 }
2402 }
2403 }
2404
2405 /* make sure we only use the first 2 bytes of user data */
2406 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2407 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2408 if (!(fs->data_mask & 0xFFFF)) {
2409 input_masks.flex_mask = 0xFFFF;
2410 } else if (~fs->data_mask & 0xFFFF) {
2411 e_err(drv, "Partial user-def-mask is not "
2412 "supported by hardware\n");
2413 return -1;
2414 }
2415 }
2416
2417 /*
2418 * Copy input into formatted structures
2419 *
2420 * These assignments are based on the following logic
2421 * If neither input or mask are set assume value is masked out.
2422 * If input is set, but mask is not mask should default to accept all.
2423 * If input is not set, but mask is set then mask likely results in 0.
2424 * If input is set and mask is set then assign both.
2425 */
2426 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2427 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2428 if (!fs->m_u.tcp_ip4_spec.ip4src)
2429 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2430 else
2431 input_masks.src_ip_mask[0] =
2432 ~fs->m_u.tcp_ip4_spec.ip4src;
2433 }
2434 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2435 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2436 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2437 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2438 else
2439 input_masks.dst_ip_mask[0] =
2440 ~fs->m_u.tcp_ip4_spec.ip4dst;
2441 }
2442 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2443 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2444 if (!fs->m_u.tcp_ip4_spec.psrc)
2445 input_masks.src_port_mask = 0xFFFF;
2446 else
2447 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2448 }
2449 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2450 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2451 if (!fs->m_u.tcp_ip4_spec.pdst)
2452 input_masks.dst_port_mask = 0xFFFF;
2453 else
2454 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2455 }
2345 2456
2346 /* determine if we need to drop or route the packet */ 2457 /* determine if we need to drop or route the packet */
2347 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2458 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2348 target_queue = MAX_RX_QUEUES - 1; 2459 target_queue = MAX_RX_QUEUES - 1;
2349 else 2460 else
2350 target_queue = fs.action; 2461 target_queue = fs->action;
2351 2462
2352 spin_lock(&adapter->fdir_perfect_lock); 2463 spin_lock(&adapter->fdir_perfect_lock);
2353 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2464 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2354 &input_masks, 0, target_queue); 2465 &input_struct,
2466 &input_masks, 0,
2467 target_queue);
2355 spin_unlock(&adapter->fdir_perfect_lock); 2468 spin_unlock(&adapter->fdir_perfect_lock);
2356 2469
2357 return 0; 2470 return err ? -1 : 0;
2358} 2471}
2359 2472
2360static const struct ethtool_ops ixgbe_ethtool_ops = { 2473static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2385,7 +2498,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
2385 .set_tso = ixgbe_set_tso, 2498 .set_tso = ixgbe_set_tso,
2386 .self_test = ixgbe_diag_test, 2499 .self_test = ixgbe_diag_test,
2387 .get_strings = ixgbe_get_strings, 2500 .get_strings = ixgbe_get_strings,
2388 .phys_id = ixgbe_phys_id, 2501 .set_phys_id = ixgbe_set_phys_id,
2389 .get_sset_count = ixgbe_get_sset_count, 2502 .get_sset_count = ixgbe_get_sset_count,
2390 .get_ethtool_stats = ixgbe_get_ethtool_stats, 2503 .get_ethtool_stats = ixgbe_get_ethtool_stats,
2391 .get_coalesce = ixgbe_get_coalesce, 2504 .get_coalesce = ixgbe_get_coalesce,